code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
A_ : List[str] = logging.get_logger(__name__)
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> Union[str, Any]:
UpperCamelCase_: Tuple = b.T
UpperCamelCase_: Tuple = np.sum(np.square(UpperCAmelCase__ ) , axis=1 )
UpperCamelCase_: Optional[Any] = np.sum(np.square(UpperCAmelCase__ ) , axis=0 )
UpperCamelCase_: Optional[int] = np.matmul(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase_: List[Any] = aa[:, None] - 2 * ab + ba[None, :]
return d
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[Any]:
UpperCamelCase_: List[str] = x.reshape(-1 , 3 )
UpperCamelCase_: Union[str, Any] = squared_euclidean_distance(UpperCAmelCase__ , UpperCAmelCase__ )
return np.argmin(UpperCAmelCase__ , axis=1 )
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : Any =['''pixel_values''']
def __init__( self , _lowerCamelCase = None , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = PILImageResampling.BILINEAR , _lowerCamelCase = True , _lowerCamelCase = True , **_lowerCamelCase , ):
super().__init__(**_lowerCamelCase )
UpperCamelCase_: List[str] = size if size is not None else {'height': 2_5_6, 'width': 2_5_6}
UpperCamelCase_: str = get_size_dict(_lowerCamelCase )
UpperCamelCase_: Any = np.array(_lowerCamelCase ) if clusters is not None else None
UpperCamelCase_: Optional[int] = do_resize
UpperCamelCase_: List[Any] = size
UpperCamelCase_: Optional[int] = resample
UpperCamelCase_: str = do_normalize
UpperCamelCase_: str = do_color_quantize
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = PILImageResampling.BILINEAR , _lowerCamelCase = None , **_lowerCamelCase , ):
UpperCamelCase_: Any = get_size_dict(_lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size dictionary must contain both height and width keys. Got {size.keys()}''' )
return resize(
_lowerCamelCase , size=(size['height'], size['width']) , resample=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def _a ( self , _lowerCamelCase , _lowerCamelCase = None , ):
UpperCamelCase_: Optional[Any] = rescale(image=_lowerCamelCase , scale=1 / 1_2_7.5 , data_format=_lowerCamelCase )
UpperCamelCase_: Optional[Any] = image - 1
return image
def _a ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = ChannelDimension.FIRST , **_lowerCamelCase , ):
UpperCamelCase_: Optional[Any] = do_resize if do_resize is not None else self.do_resize
UpperCamelCase_: Tuple = size if size is not None else self.size
UpperCamelCase_: Union[str, Any] = get_size_dict(_lowerCamelCase )
UpperCamelCase_: Union[str, Any] = resample if resample is not None else self.resample
UpperCamelCase_: Any = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase_: str = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
UpperCamelCase_: Dict = clusters if clusters is not None else self.clusters
UpperCamelCase_: Dict = np.array(_lowerCamelCase )
UpperCamelCase_: Optional[int] = make_list_of_images(_lowerCamelCase )
if not valid_images(_lowerCamelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_color_quantize and clusters is None:
raise ValueError('Clusters must be specified if do_color_quantize is True.' )
# All transformations expect numpy arrays.
UpperCamelCase_: Union[str, Any] = [to_numpy_array(_lowerCamelCase ) for image in images]
if do_resize:
UpperCamelCase_: Union[str, Any] = [self.resize(image=_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase ) for image in images]
if do_normalize:
UpperCamelCase_: Optional[Any] = [self.normalize(image=_lowerCamelCase ) for image in images]
if do_color_quantize:
UpperCamelCase_: Any = [to_channel_dimension_format(_lowerCamelCase , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
UpperCamelCase_: Optional[Any] = np.array(_lowerCamelCase )
UpperCamelCase_: Optional[Any] = color_quantize(_lowerCamelCase , _lowerCamelCase ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
UpperCamelCase_: Dict = images.shape[0]
UpperCamelCase_: Any = images.reshape(_lowerCamelCase , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
UpperCamelCase_: List[Any] = list(_lowerCamelCase )
else:
UpperCamelCase_: int = [to_channel_dimension_format(_lowerCamelCase , _lowerCamelCase ) for image in images]
UpperCamelCase_: str = {'input_ids': images}
return BatchFeature(data=_lowerCamelCase , tensor_type=_lowerCamelCase ) | 57 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
UpperCamelCase_: Optional[int] = inspect.getfile(accelerate.test_utils )
UpperCamelCase_: Dict = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps', 'test_metrics.py'] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
UpperCamelCase_: Tuple = test_metrics
@require_cpu
def _a ( self ):
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def _a ( self ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def _a ( self ):
self.test_metrics.main()
@require_multi_gpu
def _a ( self ):
print(f'''Found {torch.cuda.device_count()} devices.''' )
UpperCamelCase_: List[Any] = ['torchrun', f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_lowerCamelCase , env=os.environ.copy() ) | 57 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class A_ ( a_ ):
_SCREAMING_SNAKE_CASE = 42
class A_ ( a_ , a_ ):
@register_to_config
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : int = 6_55_36 , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : int = 2 , __SCREAMING_SNAKE_CASE : int = 2 , __SCREAMING_SNAKE_CASE : int = 0 , __SCREAMING_SNAKE_CASE : str = "fourier" , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : float = 0.0 , __SCREAMING_SNAKE_CASE : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , __SCREAMING_SNAKE_CASE : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , __SCREAMING_SNAKE_CASE : Tuple[str] = "UNetMidBlock1D" , __SCREAMING_SNAKE_CASE : str = None , __SCREAMING_SNAKE_CASE : Tuple[int] = (32, 32, 64) , __SCREAMING_SNAKE_CASE : str = None , __SCREAMING_SNAKE_CASE : int = 8 , __SCREAMING_SNAKE_CASE : int = 1 , __SCREAMING_SNAKE_CASE : bool = False , ):
super().__init__()
__a = sample_size
# time
if time_embedding_type == "fourier":
__a = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=__SCREAMING_SNAKE_CASE , log=__SCREAMING_SNAKE_CASE , flip_sin_to_cos=__SCREAMING_SNAKE_CASE )
__a = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
__a = Timesteps(
block_out_channels[0] , flip_sin_to_cos=__SCREAMING_SNAKE_CASE , downscale_freq_shift=__SCREAMING_SNAKE_CASE )
__a = block_out_channels[0]
if use_timestep_embedding:
__a = block_out_channels[0] * 4
__a = TimestepEmbedding(
in_channels=__SCREAMING_SNAKE_CASE , time_embed_dim=__SCREAMING_SNAKE_CASE , act_fn=__SCREAMING_SNAKE_CASE , out_dim=block_out_channels[0] , )
__a = nn.ModuleList([] )
__a = None
__a = nn.ModuleList([] )
__a = None
# down
__a = in_channels
for i, down_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
__a = output_channel
__a = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
__a = i == len(__SCREAMING_SNAKE_CASE ) - 1
__a = get_down_block(
__SCREAMING_SNAKE_CASE , num_layers=__SCREAMING_SNAKE_CASE , in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(__SCREAMING_SNAKE_CASE )
# mid
__a = get_mid_block(
__SCREAMING_SNAKE_CASE , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=__SCREAMING_SNAKE_CASE , add_downsample=__SCREAMING_SNAKE_CASE , )
# up
__a = list(reversed(__SCREAMING_SNAKE_CASE ) )
__a = reversed_block_out_channels[0]
if out_block_type is None:
__a = out_channels
else:
__a = block_out_channels[0]
for i, up_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
__a = output_channel
__a = (
reversed_block_out_channels[i + 1] if i < len(__SCREAMING_SNAKE_CASE ) - 1 else final_upsample_channels
)
__a = i == len(__SCREAMING_SNAKE_CASE ) - 1
__a = get_up_block(
__SCREAMING_SNAKE_CASE , num_layers=__SCREAMING_SNAKE_CASE , in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(__SCREAMING_SNAKE_CASE )
__a = output_channel
# out
__a = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
__a = get_out_block(
out_block_type=__SCREAMING_SNAKE_CASE , num_groups_out=__SCREAMING_SNAKE_CASE , embed_dim=block_out_channels[0] , out_channels=__SCREAMING_SNAKE_CASE , act_fn=__SCREAMING_SNAKE_CASE , fc_dim=block_out_channels[-1] // 4 , )
def _UpperCAmelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : torch.FloatTensor , __SCREAMING_SNAKE_CASE : Union[torch.Tensor, float, int] , __SCREAMING_SNAKE_CASE : bool = True , ):
__a = timestep
if not torch.is_tensor(__SCREAMING_SNAKE_CASE ):
__a = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(__SCREAMING_SNAKE_CASE ) and len(timesteps.shape ) == 0:
__a = timesteps[None].to(sample.device )
__a = self.time_proj(__SCREAMING_SNAKE_CASE )
if self.config.use_timestep_embedding:
__a = self.time_mlp(__SCREAMING_SNAKE_CASE )
else:
__a = timestep_embed[..., None]
__a = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
__a = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
__a = ()
for downsample_block in self.down_blocks:
__a , __a = downsample_block(hidden_states=__SCREAMING_SNAKE_CASE , temb=__SCREAMING_SNAKE_CASE )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
__a = self.mid_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
__a = down_block_res_samples[-1:]
__a = down_block_res_samples[:-1]
__a = upsample_block(__SCREAMING_SNAKE_CASE , res_hidden_states_tuple=__SCREAMING_SNAKE_CASE , temb=__SCREAMING_SNAKE_CASE )
# 5. post-process
if self.out_block:
__a = self.out_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=__SCREAMING_SNAKE_CASE )
| 525 | def __A ( _A ):
"""simple docstring"""
__a = []
for data in source_data:
for i, el in enumerate(_A ):
if len(_A ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(_A ) )
return data_lists
def __A ( _A , _A ):
"""simple docstring"""
__a = []
for dlist, weight in zip(_A , _A ):
__a = min(_A )
__a = max(_A )
__a = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
__a = f"""Invalid weight of {weight:f} provided"""
raise ValueError(_A )
score_lists.append(_A )
return score_lists
def __A ( _A ):
"""simple docstring"""
__a = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(_A ):
__a = final_scores[j] + ele
return final_scores
def __A ( _A , _A ):
"""simple docstring"""
__a = get_data(_A )
__a = calculate_each_score(_A , _A )
__a = generate_final_scores(_A )
# append scores to source data
for i, ele in enumerate(_A ):
source_data[i].append(_A )
return source_data
| 525 | 1 |
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( __lowerCAmelCase : list[int] ) -> list[int]:
if len(__lowerCAmelCase ) == 0:
return array
snake_case , snake_case = min(__lowerCAmelCase ), max(__lowerCAmelCase )
# Compute the variables
snake_case = _max - _min + 1
snake_case , snake_case = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
snake_case = i - _min
snake_case = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
snake_case = 0
for i in range(__lowerCAmelCase ):
while holes_repeat[i] > 0:
snake_case = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE = input("Enter numbers separated by comma:\n")
_SCREAMING_SNAKE_CASE = [int(x) for x in user_input.split(",")]
print(pigeon_sort(unsorted))
| 369 |
'''simple docstring'''
from manim import *
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
def lowerCAmelCase ( self : Optional[Any] )-> Union[str, Any]:
snake_case = Rectangle(height=0.5 , width=0.5 )
snake_case = Rectangle(height=0.25 , width=0.25 )
snake_case = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
snake_case = [mem.copy() for i in range(6 )]
snake_case = [mem.copy() for i in range(6 )]
snake_case = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
snake_case = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
snake_case = VGroup(__snake_case , __snake_case ).arrange(__snake_case , buff=0 )
snake_case = Text("""CPU""" , font_size=24 )
snake_case = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__snake_case )
snake_case = [mem.copy() for i in range(4 )]
snake_case = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
snake_case = Text("""GPU""" , font_size=24 )
snake_case = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
gpu.move_to([-1, -1, 0] )
self.add(__snake_case )
snake_case = [mem.copy() for i in range(6 )]
snake_case = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
snake_case = Text("""Model""" , font_size=24 )
snake_case = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
model.move_to([3, -1.0, 0] )
self.add(__snake_case )
snake_case = []
snake_case = []
snake_case = []
for i, rect in enumerate(__snake_case ):
rect.set_stroke(__snake_case )
snake_case = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__snake_case , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__snake_case )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=__snake_case , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=__snake_case , buff=0.0 )
self.add(__snake_case )
model_cpu_arr.append(__snake_case )
self.add(*__snake_case , *__snake_case , *__snake_case )
snake_case = [mem.copy() for i in range(6 )]
snake_case = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
snake_case = Text("""Loaded Checkpoint""" , font_size=24 )
snake_case = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
checkpoint.move_to([3, 0.5, 0] )
self.add(__snake_case )
snake_case = []
snake_case = []
for i, rect in enumerate(__snake_case ):
snake_case = fill.copy().set_fill(__snake_case , opacity=0.7 )
target.move_to(__snake_case )
ckpt_arr.append(__snake_case )
snake_case = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(__snake_case )
self.add(*__snake_case , *__snake_case )
snake_case = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
snake_case = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__snake_case , __snake_case )
snake_case = MarkupText(
f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(__snake_case , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__snake_case )
snake_case = MarkupText(
f'''Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
snake_case = [meta_mem.copy() for i in range(6 )]
snake_case = [meta_mem.copy() for i in range(6 )]
snake_case = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
snake_case = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
snake_case = VGroup(__snake_case , __snake_case ).arrange(__snake_case , buff=0 )
snake_case = Text("""Disk""" , font_size=24 )
snake_case = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(__snake_case , run_time=3 ) , Write(__snake_case , run_time=1 ) , Create(__snake_case , run_time=1 ) )
snake_case = []
for i, rect in enumerate(__snake_case ):
snake_case = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(__snake_case , run_time=1.5 ) )
self.play(*__snake_case )
self.play(FadeOut(__snake_case ) )
snake_case = MarkupText(f'''Then, the checkpoint is removed from memory\nthrough garbage collection.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case , run_time=3 ) )
self.play(
FadeOut(__snake_case , __snake_case , *__snake_case , *__snake_case ) , )
self.wait()
| 369 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = "▁"
_A = {"vocab_file": "sentencepiece.bpe.model"}
_A = {
"vocab_file": {
"facebook/mbart-large-50-one-to-many-mmt": (
"https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"
),
}
}
_A = {
"facebook/mbart-large-50-one-to-many-mmt": 10_24,
}
# fmt: off
_A = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", "af_ZA", "az_AZ", "bn_IN", "fa_IR", "he_IL", "hr_HR", "id_ID", "ka_GE", "km_KH", "mk_MK", "ml_IN", "mn_MN", "mr_IN", "pl_PL", "ps_AF", "pt_XX", "sv_SE", "sw_KE", "ta_IN", "te_IN", "th_TH", "tl_XX", "uk_UA", "ur_PK", "xh_ZA", "gl_ES", "sl_SI"]
class _lowerCAmelCase ( __lowercase ):
_lowercase =VOCAB_FILES_NAMES
_lowercase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase =PRETRAINED_VOCAB_FILES_MAP
_lowercase =['''input_ids''', '''attention_mask''']
_lowercase =[]
_lowercase =[]
def __init__( self , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase="</s>" , _UpperCamelCase="</s>" , _UpperCamelCase="<s>" , _UpperCamelCase="<unk>" , _UpperCamelCase="<pad>" , _UpperCamelCase="<mask>" , _UpperCamelCase = None , **_UpperCamelCase , ) -> Union[str, Any]:
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token
lowerCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCAmelCase_ = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=_A , tgt_lang=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , mask_token=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , )
lowerCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_A ) )
lowerCAmelCase_ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCAmelCase_ = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCAmelCase_ = 1
lowerCAmelCase_ = len(self.sp_model )
lowerCAmelCase_ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_A )
}
lowerCAmelCase_ = {v: k for k, v in self.lang_code_to_id.items()}
lowerCAmelCase_ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
lowerCAmelCase_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowerCAmelCase_ = src_lang if src_lang is not None else "en_XX"
lowerCAmelCase_ = self.lang_code_to_id[self._src_lang]
lowerCAmelCase_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __a ( self ) -> Tuple:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __a ( self ) -> List[Any]:
return self._src_lang
@src_lang.setter
def __a ( self , _UpperCamelCase ) -> List[str]:
lowerCAmelCase_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ) -> Tuple:
lowerCAmelCase_ = self.__dict__.copy()
lowerCAmelCase_ = None
return state
def __setstate__( self , _UpperCamelCase ) -> Any:
lowerCAmelCase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCAmelCase_ = {}
lowerCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __a ( self ) -> Any:
lowerCAmelCase_ = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __a ( self , _UpperCamelCase ) -> Optional[int]:
return self.sp_model.encode(_A , out_type=_A )
def __a ( self , _UpperCamelCase ) -> Dict:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCAmelCase_ = self.sp_model.PieceToId(_A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __a ( self , _UpperCamelCase ) -> List[Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __a ( self , _UpperCamelCase ) -> List[Any]:
lowerCAmelCase_ = []
lowerCAmelCase_ = ""
lowerCAmelCase_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_A ) + token
lowerCAmelCase_ = True
lowerCAmelCase_ = []
else:
current_sub_tokens.append(_A )
lowerCAmelCase_ = False
out_string += self.sp_model.decode(_A )
return out_string.strip()
def __a ( self , _UpperCamelCase , _UpperCamelCase = None ) -> int:
if not os.path.isdir(_A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase_ = os.path.join(
_A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _A )
elif not os.path.isfile(self.vocab_file ):
with open(_A , "wb" ) as fi:
lowerCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
def __a ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False ) -> List[Any]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
lowerCAmelCase_ = [1] * len(self.prefix_tokens )
lowerCAmelCase_ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_A )) + suffix_ones
return prefix_ones + ([0] * len(_A )) + ([0] * len(_A )) + suffix_ones
def __a ( self , _UpperCamelCase , _UpperCamelCase = None ) -> Optional[Any]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ) -> str:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
lowerCAmelCase_ = src_lang
lowerCAmelCase_ = self(_A , add_special_tokens=_A , return_tensors=_A , **_A )
lowerCAmelCase_ = self.convert_tokens_to_ids(_A )
lowerCAmelCase_ = tgt_lang_id
return inputs
def __a ( self , _UpperCamelCase , _UpperCamelCase = "en_XX" , _UpperCamelCase = None , _UpperCamelCase = "ro_RO" , **_UpperCamelCase , ) -> str:
lowerCAmelCase_ = src_lang
lowerCAmelCase_ = tgt_lang
return super().prepare_seqaseq_batch(_A , _A , **_A )
def __a ( self ) -> Optional[Any]:
return self.set_src_lang_special_tokens(self.src_lang )
def __a ( self ) -> List[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __a ( self , _UpperCamelCase ) -> Any:
lowerCAmelCase_ = self.lang_code_to_id[src_lang]
lowerCAmelCase_ = [self.cur_lang_code_id]
lowerCAmelCase_ = [self.eos_token_id]
def __a ( self , _UpperCamelCase ) -> Tuple:
lowerCAmelCase_ = self.lang_code_to_id[tgt_lang]
lowerCAmelCase_ = [self.cur_lang_code_id]
lowerCAmelCase_ = [self.eos_token_id]
| 705 |
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
_A = get_logger(__name__)
_A = R"\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n"
class _lowerCAmelCase :
@add_start_docstrings(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase ) -> jnp.ndarray:
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class _lowerCAmelCase :
@add_start_docstrings(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase ) -> jnp.ndarray:
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class _lowerCAmelCase ( __a ):
@add_start_docstrings(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ) -> jnp.ndarray:
for processor in self:
lowerCAmelCase_ = inspect.signature(processor.__call__ ).parameters
if len(_UpperCamelCase ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f"""Make sure that all the required parameters: {list(function_args.keys() )} for """
f"""{processor.__class__} are passed to the logits processor.""" )
lowerCAmelCase_ = processor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase )
else:
lowerCAmelCase_ = processor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return scores
class _lowerCAmelCase ( __a ):
def __init__( self , _UpperCamelCase ) -> Tuple:
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or not (temperature > 0):
raise ValueError(f"""`temperature` has to be a strictly positive float, but is {temperature}""" )
lowerCAmelCase_ = temperature
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> jnp.ndarray:
lowerCAmelCase_ = scores / self.temperature
return scores
class _lowerCAmelCase ( __a ):
def __init__( self , _UpperCamelCase , _UpperCamelCase = -float("Inf" ) , _UpperCamelCase = 1 ) -> Union[str, Any]:
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f"""`top_p` has to be a float > 0 and < 1, but is {top_p}""" )
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or (min_tokens_to_keep < 1):
raise ValueError(f"""`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}""" )
lowerCAmelCase_ = top_p
lowerCAmelCase_ = filter_value
lowerCAmelCase_ = min_tokens_to_keep
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> jnp.ndarray:
lowerCAmelCase_ , lowerCAmelCase_ = lax.top_k(_UpperCamelCase , scores.shape[-1] )
lowerCAmelCase_ = jnp.full_like(_UpperCamelCase , self.filter_value )
lowerCAmelCase_ = jax.nn.softmax(_UpperCamelCase , axis=-1 ).cumsum(axis=-1 )
lowerCAmelCase_ = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
lowerCAmelCase_ = jnp.roll(_UpperCamelCase , 1 )
score_mask |= score_mask.at[:, 0].set(_UpperCamelCase )
# min tokens to keep
lowerCAmelCase_ = score_mask.at[:, : self.min_tokens_to_keep].set(_UpperCamelCase )
lowerCAmelCase_ = jnp.where(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = jax.lax.sort_key_val(_UpperCamelCase , _UpperCamelCase )[-1]
return next_scores
class _lowerCAmelCase ( __a ):
def __init__( self , _UpperCamelCase , _UpperCamelCase = -float("Inf" ) , _UpperCamelCase = 1 ) -> List[Any]:
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or top_k <= 0:
raise ValueError(f"""`top_k` has to be a strictly positive integer, but is {top_k}""" )
lowerCAmelCase_ = max(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = filter_value
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> jnp.ndarray:
lowerCAmelCase_ , lowerCAmelCase_ = scores.shape
lowerCAmelCase_ = jnp.full(batch_size * vocab_size , self.filter_value )
lowerCAmelCase_ = min(self.top_k , scores.shape[-1] ) # Safety check
lowerCAmelCase_ , lowerCAmelCase_ = lax.top_k(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = jnp.broadcast_to((jnp.arange(_UpperCamelCase ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
lowerCAmelCase_ = topk_scores.flatten()
lowerCAmelCase_ = topk_indices.flatten() + shift
lowerCAmelCase_ = next_scores_flat.at[topk_indices_flat].set(_UpperCamelCase )
lowerCAmelCase_ = next_scores_flat.reshape(_UpperCamelCase , _UpperCamelCase )
return next_scores
class _lowerCAmelCase ( __a ):
def __init__( self , _UpperCamelCase ) -> Any:
lowerCAmelCase_ = bos_token_id
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> jnp.ndarray:
lowerCAmelCase_ = jnp.full(scores.shape , -float("inf" ) )
lowerCAmelCase_ = 1 - jnp.bool_(cur_len - 1 )
lowerCAmelCase_ = jnp.where(_UpperCamelCase , new_scores.at[:, self.bos_token_id].set(0 ) , _UpperCamelCase )
return scores
class _lowerCAmelCase ( __a ):
def __init__( self , _UpperCamelCase , _UpperCamelCase ) -> List[str]:
lowerCAmelCase_ = max_length
lowerCAmelCase_ = eos_token_id
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> jnp.ndarray:
lowerCAmelCase_ = jnp.full(scores.shape , -float("inf" ) )
lowerCAmelCase_ = 1 - jnp.bool_(cur_len - self.max_length + 1 )
lowerCAmelCase_ = jnp.where(_UpperCamelCase , new_scores.at[:, self.eos_token_id].set(0 ) , _UpperCamelCase )
return scores
class _lowerCAmelCase ( __a ):
def __init__( self , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or min_length < 0:
raise ValueError(f"""`min_length` has to be a positive integer, but is {min_length}""" )
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or eos_token_id < 0:
raise ValueError(f"""`eos_token_id` has to be a positive integer, but is {eos_token_id}""" )
lowerCAmelCase_ = min_length
lowerCAmelCase_ = eos_token_id
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> jnp.ndarray:
# create boolean flag to decide if min length penalty should be applied
lowerCAmelCase_ = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
lowerCAmelCase_ = jnp.where(_UpperCamelCase , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , _UpperCamelCase )
return scores
class _lowerCAmelCase ( __a ):
def __init__( self , _UpperCamelCase , _UpperCamelCase ) -> int:
lowerCAmelCase_ = list(_UpperCamelCase )
lowerCAmelCase_ = begin_index
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> int:
lowerCAmelCase_ = 1 - jnp.bool_(cur_len - self.begin_index )
lowerCAmelCase_ = jnp.where(_UpperCamelCase , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , _UpperCamelCase )
return scores
class _lowerCAmelCase ( __a ):
def __init__( self , _UpperCamelCase ) -> Optional[Any]:
lowerCAmelCase_ = list(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> jnp.ndarray:
lowerCAmelCase_ = scores.at[..., self.suppress_tokens].set(-float("inf" ) )
return scores
class _lowerCAmelCase ( __a ):
def __init__( self , _UpperCamelCase ) -> List[Any]:
lowerCAmelCase_ = dict(_UpperCamelCase )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
lowerCAmelCase_ = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
lowerCAmelCase_ = force_token_array.at[index].set(_UpperCamelCase )
lowerCAmelCase_ = jnp.intaa(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> jnp.ndarray:
def _force_token(_UpperCamelCase ):
lowerCAmelCase_ = scores.shape[0]
lowerCAmelCase_ = self.force_token_array[generation_idx]
lowerCAmelCase_ = jnp.ones_like(_UpperCamelCase , dtype=scores.dtype ) * -float("inf" )
lowerCAmelCase_ = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
lowerCAmelCase_ = lax.dynamic_update_slice(_UpperCamelCase , _UpperCamelCase , (0, current_token) )
return new_scores
lowerCAmelCase_ = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(_UpperCamelCase ) , lambda: scores , ) , )
return scores
class _lowerCAmelCase ( __a ):
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Tuple:
lowerCAmelCase_ = generate_config.eos_token_id
lowerCAmelCase_ = generate_config.no_timestamps_token_id
lowerCAmelCase_ = generate_config.no_timestamps_token_id + 1
lowerCAmelCase_ = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(_UpperCamelCase , "max_initial_timestamp_index" ):
lowerCAmelCase_ = generate_config.max_initial_timestamp_index
else:
lowerCAmelCase_ = model_config.vocab_size
if self.max_initial_timestamp_index is None:
lowerCAmelCase_ = model_config.vocab_size
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]:
# suppress <|notimestamps|> which is handled by without_timestamps
lowerCAmelCase_ = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) )
def handle_pairs(_UpperCamelCase , _UpperCamelCase ):
lowerCAmelCase_ = jnp.where((cur_len - self.begin_index) >= 1 , _UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , _UpperCamelCase , )
lowerCAmelCase_ = jnp.where((cur_len - self.begin_index) < 2 , _UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , _UpperCamelCase , _UpperCamelCase , )
return jnp.where(
_UpperCamelCase , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , _UpperCamelCase , )
lowerCAmelCase_ = jax.vmap(_UpperCamelCase )(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = jnp.where(cur_len == self.begin_index , _UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , _UpperCamelCase , )
lowerCAmelCase_ = self.timestamp_begin + self.max_initial_timestamp_index
lowerCAmelCase_ = jnp.where(
_UpperCamelCase , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , _UpperCamelCase , )
# if sum of probability over timestamps is above any other token, sample timestamp
lowerCAmelCase_ = jax.nn.log_softmax(_UpperCamelCase , axis=-1 )
def handle_cumulative_probs(_UpperCamelCase , _UpperCamelCase ):
lowerCAmelCase_ = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
lowerCAmelCase_ = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , _UpperCamelCase , )
lowerCAmelCase_ = jax.vmap(_UpperCamelCase )(_UpperCamelCase , _UpperCamelCase )
return scores
| 279 | 0 |
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
snake_case_ = str(bin(SCREAMING_SNAKE_CASE__ ) )
binary_number += "0" * shift_amount
return binary_number
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
snake_case_ = str(bin(SCREAMING_SNAKE_CASE__ ) )[2:]
if shift_amount >= len(SCREAMING_SNAKE_CASE__ ):
return "0b0"
snake_case_ = binary_number[: len(SCREAMING_SNAKE_CASE__ ) - shift_amount]
return "0b" + shifted_binary_number
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if number >= 0: # Get binary representation of positive number
snake_case_ = '''0''' + str(bin(SCREAMING_SNAKE_CASE__ ) ).strip('''-''' )[2:]
else: # Get binary (2's complement) representation of negative number
snake_case_ = len(bin(SCREAMING_SNAKE_CASE__ )[3:] ) # Find 2's complement of number
snake_case_ = bin(abs(SCREAMING_SNAKE_CASE__ ) - (1 << binary_number_length) )[3:]
snake_case_ = (
'''1''' + '''0''' * (binary_number_length - len(SCREAMING_SNAKE_CASE__ )) + binary_number
)
if shift_amount >= len(SCREAMING_SNAKE_CASE__ ):
return "0b" + binary_number[0] * len(SCREAMING_SNAKE_CASE__ )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(SCREAMING_SNAKE_CASE__ ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod() | 39 | '''simple docstring'''
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
SCREAMING_SNAKE_CASE_ = 2_99_79_24_58
# Symbols
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = symbols('ct x y z')
def UpperCamelCase__ ( _lowercase : float ) -> float:
if velocity > c:
raise ValueError("""Speed must not exceed light speed 299,792,458 [m/s]!""" )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError("""Speed must be greater than or equal to 1!""" )
return velocity / c
def UpperCamelCase__ ( _lowercase : float ) -> float:
return 1 / sqrt(1 - beta(_lowercase ) ** 2 )
def UpperCamelCase__ ( _lowercase : float ) -> np.ndarray:
return np.array(
[
[gamma(_lowercase ), -gamma(_lowercase ) * beta(_lowercase ), 0, 0],
[-gamma(_lowercase ) * beta(_lowercase ), gamma(_lowercase ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def UpperCamelCase__ ( _lowercase : float , _lowercase : np.ndarray | None = None ) -> np.ndarray:
# Ensure event is not empty
if event is None:
__UpperCAmelCase: List[str] = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(_lowercase ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
SCREAMING_SNAKE_CASE_ = transform(29_97_92_45)
print('Example of four vector: ')
print(F"""ct' = {four_vector[0]}""")
print(F"""x' = {four_vector[1]}""")
print(F"""y' = {four_vector[2]}""")
print(F"""z' = {four_vector[3]}""")
# Substitute symbols with numerical values
SCREAMING_SNAKE_CASE_ = {ct: c, x: 1, y: 1, z: 1}
SCREAMING_SNAKE_CASE_ = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F"""\n{numerical_vector}""") | 523 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
lowercase : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowercase : int = {
'vocab_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'
),
'google/electra-base-generator': 'https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt',
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'
),
'google/electra-base-generator': (
'https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'
),
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'
),
},
}
lowercase : Tuple = {
'google/electra-small-generator': 512,
'google/electra-base-generator': 512,
'google/electra-large-generator': 512,
'google/electra-small-discriminator': 512,
'google/electra-base-discriminator': 512,
'google/electra-large-discriminator': 512,
}
lowercase : str = {
'google/electra-small-generator': {'do_lower_case': True},
'google/electra-base-generator': {'do_lower_case': True},
'google/electra-large-generator': {'do_lower_case': True},
'google/electra-small-discriminator': {'do_lower_case': True},
'google/electra-base-discriminator': {'do_lower_case': True},
'google/electra-large-discriminator': {'do_lower_case': True},
}
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_INIT_CONFIGURATION
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = ElectraTokenizer
def __init__( self :Optional[Any] , a :List[Any]=None , a :Tuple=None , a :Tuple=True , a :Dict="[UNK]" , a :Optional[int]="[SEP]" , a :str="[PAD]" , a :Union[str, Any]="[CLS]" , a :int="[MASK]" , a :int=True , a :int=None , **a :List[str] , ) -> str:
super().__init__(
a , tokenizer_file=a , do_lower_case=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , tokenize_chinese_chars=a , strip_accents=a , **a , )
__UpperCamelCase : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , a ) != do_lower_case
or normalizer_state.get("strip_accents" , a ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , a ) != tokenize_chinese_chars
):
__UpperCamelCase : Union[str, Any] = getattr(a , normalizer_state.pop("type" ) )
__UpperCamelCase : Union[str, Any] = do_lower_case
__UpperCamelCase : Optional[int] = strip_accents
__UpperCamelCase : str = tokenize_chinese_chars
__UpperCamelCase : List[str] = normalizer_class(**a )
__UpperCamelCase : List[str] = do_lower_case
def _lowerCamelCase ( self :Any , a :Any , a :Union[str, Any]=None ) -> Optional[Any]:
__UpperCamelCase : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowerCamelCase ( self :Optional[int] , a :List[int] , a :Optional[List[int]] = None ) -> List[int]:
__UpperCamelCase : int = [self.sep_token_id]
__UpperCamelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCamelCase ( self :int , a :str , a :Optional[str] = None ) -> Tuple[str]:
__UpperCamelCase : Optional[Any] = self._tokenizer.model.save(a , name=a )
return tuple(a ) | 94 |
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str) -> None:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase : Optional[Any] = analyze_text(_lowerCamelCase)
__UpperCamelCase : List[str] = list(" " + ascii_lowercase)
# what is our total sum of probabilities.
__UpperCamelCase : Any = sum(single_char_strings.values())
# one length string
__UpperCamelCase : Optional[Any] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
__UpperCamelCase : List[Any] = single_char_strings[ch]
__UpperCamelCase : List[str] = my_str / all_sum
my_fir_sum += prob * math.loga(_lowerCamelCase) # entropy formula.
# print entropy
print(F'{round(-1 * my_fir_sum):.1f}')
# two len string
__UpperCamelCase : Optional[Any] = sum(two_char_strings.values())
__UpperCamelCase : Tuple = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
__UpperCamelCase : List[str] = cha + cha
if sequence in two_char_strings:
__UpperCamelCase : Optional[Any] = two_char_strings[sequence]
__UpperCamelCase : Any = int(_lowerCamelCase) / all_sum
my_sec_sum += prob * math.loga(_lowerCamelCase)
# print second entropy
print(F'{round(-1 * my_sec_sum):.1f}')
# print the difference between them
print(F'{round((-1 * my_sec_sum) - (-1 * my_fir_sum)):.1f}')
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str) -> tuple[dict, dict]:
'''simple docstring'''
__UpperCamelCase : Tuple = Counter() # type: ignore
__UpperCamelCase : Any = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(_lowerCamelCase) - 1):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def _SCREAMING_SNAKE_CASE ( ) -> List[str]:
'''simple docstring'''
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main() | 94 | 1 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowercase : Optional[Any] = logging.get_logger(__name__)
@add_end_docstrings(_SCREAMING_SNAKE_CASE )
class __lowercase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , **__UpperCAmelCase ) -> List[str]:
super().__init__(**__UpperCAmelCase )
if self.framework == "tf":
raise ValueError(f'The {self.__class__} is only available in PyTorch.' )
requires_backends(self , '''vision''' )
self.check_model_type(__UpperCAmelCase )
def __call__( self , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> Dict:
if "text_queries" in kwargs:
A : Optional[int] = kwargs.pop('''text_queries''' )
if isinstance(__UpperCAmelCase , (str, Image.Image) ):
A : Optional[Any] = {'''image''': image, '''candidate_labels''': candidate_labels}
else:
A : Optional[Any] = image
A : int = super().__call__(__UpperCAmelCase , **__UpperCAmelCase )
return results
def snake_case ( self , **__UpperCAmelCase ) -> Tuple:
A : Optional[int] = {}
if "threshold" in kwargs:
A : Optional[int] = kwargs['''threshold''']
if "top_k" in kwargs:
A : int = kwargs['''top_k''']
return {}, {}, postprocess_params
def snake_case ( self , __UpperCAmelCase ) -> List[Any]:
A : str = load_image(inputs['''image'''] )
A : Optional[int] = inputs['''candidate_labels''']
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
A : Optional[Any] = candidate_labels.split(''',''' )
A : Optional[Any] = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(__UpperCAmelCase ):
A : str = self.tokenizer(__UpperCAmelCase , return_tensors=self.framework )
A : Optional[Any] = self.image_processor(__UpperCAmelCase , return_tensors=self.framework )
yield {
"is_last": i == len(__UpperCAmelCase ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def snake_case ( self , __UpperCAmelCase ) -> Tuple:
A : Optional[int] = model_inputs.pop('''target_size''' )
A : Any = model_inputs.pop('''candidate_label''' )
A : List[Any] = model_inputs.pop('''is_last''' )
A : Dict = self.model(**__UpperCAmelCase )
A : Tuple = {'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs}
return model_outputs
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=0.1 , __UpperCAmelCase=None ) -> str:
A : List[Any] = []
for model_output in model_outputs:
A : Tuple = model_output['''candidate_label''']
A : Any = BaseModelOutput(__UpperCAmelCase )
A : List[Any] = self.image_processor.post_process_object_detection(
outputs=__UpperCAmelCase , threshold=__UpperCAmelCase , target_sizes=model_output['''target_size'''] )[0]
for index in outputs["scores"].nonzero():
A : int = outputs['''scores'''][index].item()
A : int = self._get_bounding_box(outputs['''boxes'''][index][0] )
A : int = {'''score''': score, '''label''': label, '''box''': box}
results.append(__UpperCAmelCase )
A : List[str] = sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase : x["score"] , reverse=__UpperCAmelCase )
if top_k:
A : Any = results[:top_k]
return results
def snake_case ( self , __UpperCAmelCase ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' )
A , A , A , A : List[str] = box.int().tolist()
A : Any = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 542 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase : Tuple = logging.get_logger(__name__)
class __lowercase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = ['''pixel_values''']
def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PIL.Image.BICUBIC , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = 1 / 2_55 , __UpperCAmelCase = True , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None:
super().__init__(**__UpperCAmelCase )
A : Any = size if size is not None else {'''height''': 2_56, '''width''': 2_56}
A : Any = get_size_dict(__UpperCAmelCase )
A : List[Any] = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
A : List[Any] = get_size_dict(__UpperCAmelCase , param_name='''crop_size''' )
A : Dict = do_resize
A : Tuple = size
A : Union[str, Any] = resample
A : Dict = do_center_crop
A : int = crop_size
A : Union[str, Any] = do_rescale
A : str = rescale_factor
A : Optional[Any] = do_normalize
A : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PIL.Image.BICUBIC , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray:
A : int = get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}' )
return resize(
__UpperCAmelCase , size=(size['''height'''], size['''width''']) , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray:
A : Optional[Any] = get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}' )
return center_crop(__UpperCAmelCase , size=(size['''height'''], size['''width''']) , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> List[str]:
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray:
return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase=None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ) -> PIL.Image.Image:
A : Optional[int] = do_resize if do_resize is not None else self.do_resize
A : Optional[Any] = resample if resample is not None else self.resample
A : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
A : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
A : str = rescale_factor if rescale_factor is not None else self.rescale_factor
A : Tuple = do_normalize if do_normalize is not None else self.do_normalize
A : Optional[int] = image_mean if image_mean is not None else self.image_mean
A : Dict = image_std if image_std is not None else self.image_std
A : List[Any] = size if size is not None else self.size
A : List[Any] = get_size_dict(__UpperCAmelCase )
A : Dict = crop_size if crop_size is not None else self.crop_size
A : Tuple = get_size_dict(__UpperCAmelCase , param_name='''crop_size''' )
A : Any = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
A : List[Any] = [to_numpy_array(__UpperCAmelCase ) for image in images]
if do_resize:
A : List[str] = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images]
if do_center_crop:
A : Dict = [self.center_crop(image=__UpperCAmelCase , size=__UpperCAmelCase ) for image in images]
if do_rescale:
A : Union[str, Any] = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images]
if do_normalize:
A : Tuple = [self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images]
A : Optional[Any] = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
A : List[Any] = {'''pixel_values''': images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
| 542 | 1 |
"""simple docstring"""
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
snake_case = logging.getLogger(__name__)
class UpperCamelCase ( __magic_name__ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = "token-classification"
def __init__( self , lowercase__ ) -> List[Any]:
"""simple docstring"""
if type(lowercase__ ) == dict:
SCREAMING_SNAKE_CASE = Namespace(**lowercase__ )
SCREAMING_SNAKE_CASE = import_module('tasks' )
try:
SCREAMING_SNAKE_CASE = getattr(lowercase__ , hparams.task_type )
SCREAMING_SNAKE_CASE = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
SCREAMING_SNAKE_CASE = self.token_classification_task.get_labels(hparams.labels )
SCREAMING_SNAKE_CASE = CrossEntropyLoss().ignore_index
super().__init__(lowercase__ , len(self.labels ) , self.mode )
def A ( self , **lowercase__ ) -> List[str]:
"""simple docstring"""
return self.model(**lowercase__ )
def A ( self , lowercase__ , lowercase__ ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type != "distilbert":
SCREAMING_SNAKE_CASE = (
batch[2] if self.config.model_type in ['bert', 'xlnet'] else None
) # XLM and RoBERTa don"t use token_type_ids
SCREAMING_SNAKE_CASE = self(**lowercase__ )
SCREAMING_SNAKE_CASE = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def A ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.hparams
for mode in ["train", "dev", "test"]:
SCREAMING_SNAKE_CASE = self._feature_file(lowercase__ )
if os.path.exists(lowercase__ ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , lowercase__ )
SCREAMING_SNAKE_CASE = torch.load(lowercase__ )
else:
logger.info('Creating features from dataset file at %s' , args.data_dir )
SCREAMING_SNAKE_CASE = self.token_classification_task.read_examples_from_file(args.data_dir , lowercase__ )
SCREAMING_SNAKE_CASE = self.token_classification_task.convert_examples_to_features(
lowercase__ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ['xlnet'] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ['xlnet'] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=lowercase__ , pad_on_left=bool(self.config.model_type in ['xlnet'] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info('Saving features into cached file %s' , lowercase__ )
torch.save(lowercase__ , lowercase__ )
def A ( self , lowercase__ , lowercase__ , lowercase__ = False ) -> DataLoader:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._feature_file(lowercase__ )
logger.info('Loading features from cached file %s' , lowercase__ )
SCREAMING_SNAKE_CASE = torch.load(lowercase__ )
SCREAMING_SNAKE_CASE = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
SCREAMING_SNAKE_CASE = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
SCREAMING_SNAKE_CASE = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
SCREAMING_SNAKE_CASE = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
SCREAMING_SNAKE_CASE = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) , batch_size=lowercase__ )
def A ( self , lowercase__ , lowercase__ ) -> Any:
"""simple docstring"""
"""Compute validation""" ""
SCREAMING_SNAKE_CASE = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type != "distilbert":
SCREAMING_SNAKE_CASE = (
batch[2] if self.config.model_type in ['bert', 'xlnet'] else None
) # XLM and RoBERTa don"t use token_type_ids
SCREAMING_SNAKE_CASE = self(**lowercase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = outputs[:2]
SCREAMING_SNAKE_CASE = logits.detach().cpu().numpy()
SCREAMING_SNAKE_CASE = inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def A ( self , lowercase__ ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = torch.stack([x['val_loss'] for x in outputs] ).mean()
SCREAMING_SNAKE_CASE = np.concatenate([x['pred'] for x in outputs] , axis=0 )
SCREAMING_SNAKE_CASE = np.argmax(lowercase__ , axis=2 )
SCREAMING_SNAKE_CASE = np.concatenate([x['target'] for x in outputs] , axis=0 )
SCREAMING_SNAKE_CASE = dict(enumerate(self.labels ) )
SCREAMING_SNAKE_CASE = [[] for _ in range(out_label_ids.shape[0] )]
SCREAMING_SNAKE_CASE = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
SCREAMING_SNAKE_CASE = {
'val_loss': val_loss_mean,
'accuracy_score': accuracy_score(lowercase__ , lowercase__ ),
'precision': precision_score(lowercase__ , lowercase__ ),
'recall': recall_score(lowercase__ , lowercase__ ),
'f1': fa_score(lowercase__ , lowercase__ ),
}
SCREAMING_SNAKE_CASE = dict(results.items() )
SCREAMING_SNAKE_CASE = results
return ret, preds_list, out_label_list
def A ( self , lowercase__ ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._eval_end(lowercase__ )
SCREAMING_SNAKE_CASE = ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def A ( self , lowercase__ ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._eval_end(lowercase__ )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
SCREAMING_SNAKE_CASE = ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def A ( lowercase__ , lowercase__ ) -> Union[str, Any]:
"""simple docstring"""
BaseTransformer.add_model_specific_args(lowercase__ , lowercase__ )
parser.add_argument(
'--task_type' , default='NER' , type=lowercase__ , help='Task type to fine tune in training (e.g. NER, POS, etc)' )
parser.add_argument(
'--max_seq_length' , default=128 , type=lowercase__ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--labels' , default='' , type=lowercase__ , help='Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.' , )
parser.add_argument(
'--gpus' , default=0 , type=lowercase__ , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
return parser
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
snake_case = NERTransformer.add_model_specific_args(parser, os.getcwd())
snake_case = parser.parse_args()
snake_case = NERTransformer(args)
snake_case = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
snake_case = sorted(glob.glob(os.path.join(args.output_dir, 'checkpoint-epoch=*.ckpt'), recursive=True))
snake_case = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 406 |
"""simple docstring"""
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
snake_case = logging.get_logger(__name__)
snake_case = R'\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax\n or scores for each vocabulary token after SoftMax.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional stopping criteria specific kwargs.\n\n Return:\n `bool`. `False` indicates we should continue, `True` indicates we should stop.\n\n'
class UpperCamelCase ( __magic_name__ ):
"""simple docstring"""
@add_start_docstrings(lowercase__ )
def __call__( self , lowercase__ , lowercase__ , **lowercase__ ) -> bool:
"""simple docstring"""
raise NotImplementedError('StoppingCriteria needs to be subclassed' )
class UpperCamelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self , lowercase__ , lowercase__ = None ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE = max_length
SCREAMING_SNAKE_CASE = max_position_embeddings
@add_start_docstrings(lowercase__ )
def __call__( self , lowercase__ , lowercase__ , **lowercase__ ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE = input_ids.shape[-1]
SCREAMING_SNAKE_CASE = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
'This is a friendly reminder - the current text generation call will exceed the model\'s predefined '
f'''maximum length ({self.max_position_embeddings}). Depending on the model, you may observe '''
'exceptions, performance degradation, or nothing at all.' )
return is_done
class UpperCamelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self , lowercase__ , lowercase__ ) -> Optional[int]:
"""simple docstring"""
warnings.warn(
'The class `MaxNewTokensCriteria` is deprecated. '
f'''Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` '''
'with `max_length = start_length + max_new_tokens` instead.' , lowercase__ , )
SCREAMING_SNAKE_CASE = start_length
SCREAMING_SNAKE_CASE = max_new_tokens
SCREAMING_SNAKE_CASE = start_length + max_new_tokens
@add_start_docstrings(lowercase__ )
def __call__( self , lowercase__ , lowercase__ , **lowercase__ ) -> bool:
"""simple docstring"""
return input_ids.shape[-1] >= self.max_length
class UpperCamelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self , lowercase__ , lowercase__ = None ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = max_time
SCREAMING_SNAKE_CASE = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(lowercase__ )
def __call__( self , lowercase__ , lowercase__ , **lowercase__ ) -> bool:
"""simple docstring"""
return time.time() - self.initial_timestamp > self.max_time
class UpperCamelCase ( __magic_name__ ):
"""simple docstring"""
@add_start_docstrings(lowercase__ )
def __call__( self , lowercase__ , lowercase__ , **lowercase__ ) -> bool:
"""simple docstring"""
return any(criteria(lowercase__ , lowercase__ ) for criteria in self )
@property
def A ( self ) -> Optional[int]:
"""simple docstring"""
for stopping_criterium in self:
if isinstance(lowercase__ , lowercase__ ):
return stopping_criterium.max_length
elif isinstance(lowercase__ , lowercase__ ):
return stopping_criterium.max_length
return None
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = stopping_criteria.max_length
SCREAMING_SNAKE_CASE = deepcopy(SCREAMING_SNAKE_CASE_ )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn('You set different `max_length` for stopping criteria and `max_length` parameter', SCREAMING_SNAKE_CASE_ )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=SCREAMING_SNAKE_CASE_ ) )
return new_stopping_criteria
| 406 | 1 |
'''simple docstring'''
import re
def A_( A : str):
if len(re.findall('[ATCG]' , A)) != len(A):
raise ValueError('Invalid Strand')
return dna.translate(dna.maketrans('ATCG' , 'TAGC'))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
a : int = logging.get_logger(__name__)
def lowerCAmelCase_ (lowerCAmelCase__: Optional[Any] ):
"""simple docstring"""
UpperCAmelCase_: str = """huggingface/label-files"""
UpperCAmelCase_: Optional[int] = """imagenet-1k-id2label.json"""
UpperCAmelCase_: Union[str, Any] = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase_: Optional[Any] = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
UpperCAmelCase_: Tuple = {v: k for k, v in idalabel.items()}
UpperCAmelCase_: int = """std_conv""" if """bit""" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
UpperCAmelCase_: Any = BitConfig(
conv_layer=lowerCAmelCase__ , num_labels=1_0_0_0 , idalabel=lowerCAmelCase__ , labelaid=lowerCAmelCase__ , )
return config
def lowerCAmelCase_ (lowerCAmelCase__: List[Any] ):
"""simple docstring"""
if "stem.conv" in name:
UpperCAmelCase_: Dict = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
UpperCAmelCase_: Tuple = name.replace("""blocks""" , """layers""" )
if "head.fc" in name:
UpperCAmelCase_: int = name.replace("""head.fc""" , """classifier.1""" )
if name.startswith("""norm""" ):
UpperCAmelCase_: List[Any] = """bit.""" + name
if "bit" not in name and "classifier" not in name:
UpperCAmelCase_: str = """bit.encoder.""" + name
return name
def lowerCAmelCase_ ():
"""simple docstring"""
UpperCAmelCase_: Any = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase_: Optional[int] = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ (lowerCAmelCase__: Tuple , lowerCAmelCase__: Optional[Any] , lowerCAmelCase__: str=False ):
"""simple docstring"""
UpperCAmelCase_: List[str] = get_config(lowerCAmelCase__ )
# load original model from timm
UpperCAmelCase_: str = create_model(lowerCAmelCase__ , pretrained=lowerCAmelCase__ )
timm_model.eval()
# load state_dict of original model
UpperCAmelCase_: Optional[Any] = timm_model.state_dict()
for key in state_dict.copy().keys():
UpperCAmelCase_: Any = state_dict.pop(lowerCAmelCase__ )
UpperCAmelCase_: Optional[Any] = val.squeeze() if """head""" in key else val
# load HuggingFace model
UpperCAmelCase_: Union[str, Any] = BitForImageClassification(lowerCAmelCase__ )
model.eval()
model.load_state_dict(lowerCAmelCase__ )
# create image processor
UpperCAmelCase_: Any = create_transform(**resolve_data_config({} , model=lowerCAmelCase__ ) )
UpperCAmelCase_: List[Any] = transform.transforms
UpperCAmelCase_: Tuple = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
UpperCAmelCase_: int = BitImageProcessor(
do_resize=lowerCAmelCase__ , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowerCAmelCase__ , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=lowerCAmelCase__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
UpperCAmelCase_: Optional[Any] = prepare_img()
UpperCAmelCase_: Tuple = transform(lowerCAmelCase__ ).unsqueeze(0 )
UpperCAmelCase_: Dict = processor(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ )
# verify logits
with torch.no_grad():
UpperCAmelCase_: Dict = model(lowerCAmelCase__ )
UpperCAmelCase_: str = outputs.logits
print("""Logits:""" , logits[0, :3] )
print("""Predicted class:""" , model.config.idalabel[logits.argmax(-1 ).item()] )
UpperCAmelCase_: List[str] = timm_model(lowerCAmelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase__ , outputs.logits , atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
print(F'Saving model {model_name} and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print(F'Pushing model {model_name} and processor to the hub' )
model.push_to_hub(F'ybelkada/{model_name}' )
processor.push_to_hub(F'ybelkada/{model_name}' )
if __name__ == "__main__":
a : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
a : Tuple = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 556 | 0 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class _UpperCAmelCase :
a = 42
a = None
a = None
def _lowerCAmelCase ( ):
'''simple docstring'''
A_ : Optional[int] = Node(1 )
A_ : Dict = Node(2 )
A_ : Union[str, Any] = Node(3 )
A_ : Any = Node(4 )
A_ : Union[str, Any] = Node(5 )
return tree
def _lowerCAmelCase ( _lowerCAmelCase ):
'''simple docstring'''
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def _lowerCAmelCase ( _lowerCAmelCase ):
'''simple docstring'''
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def _lowerCAmelCase ( _lowerCAmelCase ):
'''simple docstring'''
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def _lowerCAmelCase ( _lowerCAmelCase ):
'''simple docstring'''
return (max(height(root.left ) ,height(root.right ) ) + 1) if root else 0
def _lowerCAmelCase ( _lowerCAmelCase ):
'''simple docstring'''
A_ : list[Any] = []
if root is None:
return output
A_ : Optional[Any] = deque([root] )
while process_queue:
A_ : List[str] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
A_ : list[Any] = []
def populate_output(_lowerCAmelCase ,_lowerCAmelCase ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left ,level - 1 )
populate_output(root.right ,level - 1 )
populate_output(_lowerCAmelCase ,_lowerCAmelCase )
return output
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
A_ : list[Any] = []
def populate_output(_lowerCAmelCase ,_lowerCAmelCase ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right ,level - 1 )
populate_output(root.left ,level - 1 )
populate_output(_lowerCAmelCase ,_lowerCAmelCase )
return output
def _lowerCAmelCase ( _lowerCAmelCase ):
'''simple docstring'''
if root is None:
return []
A_ : list[Sequence[Node | None]] = []
A_ : Optional[Any] = 0
A_ : Union[str, Any] = height(_lowerCAmelCase )
for h in range(1 ,height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(_lowerCAmelCase ,_lowerCAmelCase ) )
A_ : List[Any] = 1
else:
output.append(get_nodes_from_right_to_left(_lowerCAmelCase ,_lowerCAmelCase ) )
A_ : int = 0
return output
def _lowerCAmelCase ( ): # Main function for testing.
'''simple docstring'''
A_ : Dict = make_tree()
print(f"""In-order Traversal: {inorder(_lowerCAmelCase )}""" )
print(f"""Pre-order Traversal: {preorder(_lowerCAmelCase )}""" )
print(f"""Post-order Traversal: {postorder(_lowerCAmelCase )}""" ,"""\n""" )
print(f"""Height of Tree: {height(_lowerCAmelCase )}""" ,"""\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(_lowerCAmelCase ) ,"""\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 ,height(_lowerCAmelCase ) + 1 ):
print(f"""Level {level}:""" ,get_nodes_from_left_to_right(_lowerCAmelCase ,level=_lowerCAmelCase ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(_lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 481 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase=False ):
'''simple docstring'''
A_ : List[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""deit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""deit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""deit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""deit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""deit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""deit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""deit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""deit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""deit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""deit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """deit.embeddings.cls_token"""),
("""dist_token""", """deit.embeddings.distillation_token"""),
("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """deit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
A_ : Tuple = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("""norm.weight""", """deit.layernorm.weight"""),
("""norm.bias""", """deit.layernorm.bias"""),
("""head.weight""", """cls_classifier.weight"""),
("""head.bias""", """cls_classifier.bias"""),
("""head_dist.weight""", """distillation_classifier.weight"""),
("""head_dist.bias""", """distillation_classifier.bias"""),
] )
return rename_keys
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
A_ : str = """"""
else:
A_ : List[str] = """deit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A_ : Tuple = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
A_ : str = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
A_ : Optional[Any] = in_proj_weight[
: config.hidden_size, :
]
A_ : Optional[Any] = in_proj_bias[: config.hidden_size]
A_ : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A_ : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A_ : str = in_proj_weight[
-config.hidden_size :, :
]
A_ : Any = in_proj_bias[-config.hidden_size :]
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
A_ : List[str] = dct.pop(_lowerCAmelCase )
A_ : Any = val
def _lowerCAmelCase ( ):
'''simple docstring'''
A_ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : Union[str, Any] = Image.open(requests.get(_lowerCAmelCase ,stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
A_ : Any = DeiTConfig()
# all deit models have fine-tuned heads
A_ : Optional[Any] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
A_ : Optional[int] = 1_0_0_0
A_ : List[Any] = """huggingface/label-files"""
A_ : str = """imagenet-1k-id2label.json"""
A_ : Union[str, Any] = json.load(open(hf_hub_download(_lowerCAmelCase ,_lowerCAmelCase ,repo_type="""dataset""" ) ,"""r""" ) )
A_ : Optional[Any] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
A_ : Union[str, Any] = idalabel
A_ : Optional[int] = {v: k for k, v in idalabel.items()}
A_ : Dict = int(deit_name[-6:-4] )
A_ : Any = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("""tiny""" ):
A_ : str = 1_9_2
A_ : Dict = 7_6_8
A_ : List[Any] = 1_2
A_ : Dict = 3
elif deit_name[9:].startswith("""small""" ):
A_ : Optional[int] = 3_8_4
A_ : List[Any] = 1_5_3_6
A_ : Optional[Any] = 1_2
A_ : List[str] = 6
if deit_name[9:].startswith("""base""" ):
pass
elif deit_name[4:].startswith("""large""" ):
A_ : Dict = 1_0_2_4
A_ : int = 4_0_9_6
A_ : Any = 2_4
A_ : str = 1_6
# load original model from timm
A_ : int = timm.create_model(_lowerCAmelCase ,pretrained=_lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A_ : Union[str, Any] = timm_model.state_dict()
A_ : Optional[Any] = create_rename_keys(_lowerCAmelCase ,_lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
# load HuggingFace model
A_ : List[str] = DeiTForImageClassificationWithTeacher(_lowerCAmelCase ).eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image, prepared by DeiTImageProcessor
A_ : List[Any] = int(
(2_5_6 / 2_2_4) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
A_ : Any = DeiTImageProcessor(size=_lowerCAmelCase ,crop_size=config.image_size )
A_ : Optional[int] = image_processor(images=prepare_img() ,return_tensors="""pt""" )
A_ : List[str] = encoding["""pixel_values"""]
A_ : List[Any] = model(_lowerCAmelCase )
A_ : Tuple = timm_model(_lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCAmelCase ,outputs.logits ,atol=1e-3 )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f"""Saving model {deit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCAmelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--deit_name""",
default="""vit_deit_base_distilled_patch16_224""",
type=str,
help="""Name of the DeiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_lowerCAmelCase = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 481 | 1 |
'''simple docstring'''
import enum
import shutil
import sys
a__ , a__ : Any = shutil.get_terminal_size()
a__ : Optional[int] = {'''UP''': '''A''', '''DOWN''': '''B''', '''RIGHT''': '''C''', '''LEFT''': '''D'''}
class __snake_case ( enum.Enum ):
__lowerCAmelCase = 0
__lowerCAmelCase = 1
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_="" ) ->Optional[Any]:
sys.stdout.write(str(UpperCAmelCase_ ) + end )
sys.stdout.flush()
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_="" ) ->List[str]:
forceWrite(f'''\u001b[{color}m{content}\u001b[0m''' , UpperCAmelCase_ )
def __lowerCamelCase ( ) ->Optional[Any]:
forceWrite('\r' )
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ ) ->Any:
forceWrite(f'''\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}''' )
def __lowerCamelCase ( ) ->str:
forceWrite(' ' * TERMINAL_WIDTH )
reset_cursor()
def __lowerCamelCase ( ) ->Tuple:
reset_cursor()
forceWrite('-' * TERMINAL_WIDTH )
| 368 |
'''simple docstring'''
from math import loga
def __lowerCamelCase ( UpperCAmelCase_ ) ->int:
if a < 0:
raise ValueError('Input value must be a positive integer' )
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise TypeError('Input value must be a \'int\' type' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 368 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[str, Any]=13 , lowerCamelCase_ : Union[str, Any]=3 , lowerCamelCase_ : Tuple=224 , lowerCamelCase_ : Optional[Any]=30 , lowerCamelCase_ : Tuple=400 , lowerCamelCase_ : Tuple=True , lowerCamelCase_ : int=None , lowerCamelCase_ : str=True , lowerCamelCase_ : int=[0.5, 0.5, 0.5] , lowerCamelCase_ : Tuple=[0.5, 0.5, 0.5] , ) -> Tuple:
__magic_name__ : List[Any] = size if size is not None else {'''height''': 18, '''width''': 18}
__magic_name__ : Any = parent
__magic_name__ : str = batch_size
__magic_name__ : Tuple = num_channels
__magic_name__ : str = image_size
__magic_name__ : Dict = min_resolution
__magic_name__ : str = max_resolution
__magic_name__ : int = do_resize
__magic_name__ : List[Any] = size
__magic_name__ : Tuple = do_normalize
__magic_name__ : Dict = image_mean
__magic_name__ : str = image_std
def UpperCAmelCase__ ( self : str ) -> List[str]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowerCamelCase ( _lowerCamelCase ,unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ =ViTImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[Any]:
__magic_name__ : str = EfficientFormerImageProcessorTester(self )
@property
def UpperCAmelCase__ ( self : str ) -> int:
return self.image_proc_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : str ) -> int:
__magic_name__ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase_ , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCamelCase_ , '''image_std''' ) )
self.assertTrue(hasattr(lowerCamelCase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCamelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCamelCase_ , '''size''' ) )
def UpperCAmelCase__ ( self : Optional[int] ) -> List[str]:
pass
def UpperCAmelCase__ ( self : int ) -> List[Any]:
# Initialize image_processor
__magic_name__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ : int = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , Image.Image )
# Test not batched input
__magic_name__ : Tuple = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
__magic_name__ : Optional[Any] = image_processor(lowerCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
# Initialize image_processor
__magic_name__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ : str = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , np.ndarray )
# Test not batched input
__magic_name__ : str = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
__magic_name__ : List[Any] = image_processor(lowerCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def UpperCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]:
# Initialize image_processor
__magic_name__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ : Tuple = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , torch.Tensor )
# Test not batched input
__magic_name__ : int = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
__magic_name__ : List[Any] = image_processor(lowerCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
| 703 |
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class lowerCamelCase ( tf.keras.optimizers.schedules.LearningRateSchedule ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCamelCase_ : float , lowerCamelCase_ : Callable , lowerCamelCase_ : int , lowerCamelCase_ : float = 1.0 , lowerCamelCase_ : str = None , ) -> Optional[int]:
super().__init__()
__magic_name__ : Union[str, Any] = initial_learning_rate
__magic_name__ : Optional[int] = warmup_steps
__magic_name__ : Union[str, Any] = power
__magic_name__ : List[Any] = decay_schedule_fn
__magic_name__ : Dict = name
def __call__( self : Any , lowerCamelCase_ : Dict ) -> Optional[Any]:
with tf.name_scope(self.name or '''WarmUp''' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
__magic_name__ : Union[str, Any] = tf.cast(lowerCamelCase_ , tf.floataa )
__magic_name__ : Dict = tf.cast(self.warmup_steps , tf.floataa )
__magic_name__ : Dict = global_step_float / warmup_steps_float
__magic_name__ : List[str] = self.initial_learning_rate * tf.math.pow(lowerCamelCase_ , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=lowerCamelCase_ , )
def UpperCAmelCase__ ( self : Optional[int] ) -> str:
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def lowercase__ ( __A: float ,__A: int ,__A: int ,__A: float = 0.0 ,__A: float = 0.9 ,__A: float = 0.999 ,__A: float = 1e-8 ,__A: Optional[float] = None ,__A: Optional[float] = None ,__A: float = 0.0 ,__A: float = 1.0 ,__A: Optional[List[str]] = None ,):
'''simple docstring'''
__magic_name__ : Any = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=__A ,decay_steps=num_train_steps - num_warmup_steps ,end_learning_rate=init_lr * min_lr_ratio ,power=__A ,)
if num_warmup_steps:
__magic_name__ : Tuple = WarmUp(
initial_learning_rate=__A ,decay_schedule_fn=__A ,warmup_steps=__A ,)
if weight_decay_rate > 0.0:
__magic_name__ : Union[str, Any] = AdamWeightDecay(
learning_rate=__A ,weight_decay_rate=__A ,beta_a=__A ,beta_a=__A ,epsilon=__A ,clipnorm=__A ,global_clipnorm=__A ,exclude_from_weight_decay=['''LayerNorm''', '''layer_norm''', '''bias'''] ,include_in_weight_decay=__A ,)
else:
__magic_name__ : str = tf.keras.optimizers.Adam(
learning_rate=__A ,beta_a=__A ,beta_a=__A ,epsilon=__A ,clipnorm=__A ,global_clipnorm=__A ,)
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class lowerCamelCase ( _lowerCamelCase ):
'''simple docstring'''
def __init__( self : Optional[int] , lowerCamelCase_ : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.0_0_1 , lowerCamelCase_ : float = 0.9 , lowerCamelCase_ : float = 0.9_9_9 , lowerCamelCase_ : float = 1E-7 , lowerCamelCase_ : bool = False , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : Optional[List[str]] = None , lowerCamelCase_ : Optional[List[str]] = None , lowerCamelCase_ : str = "AdamWeightDecay" , **lowerCamelCase_ : int , ) -> List[str]:
super().__init__(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
__magic_name__ : int = weight_decay_rate
__magic_name__ : Tuple = include_in_weight_decay
__magic_name__ : Union[str, Any] = exclude_from_weight_decay
@classmethod
def UpperCAmelCase__ ( cls : int , lowerCamelCase_ : int ) -> Optional[Any]:
__magic_name__ : Tuple = {'''WarmUp''': WarmUp}
return super(lowerCamelCase_ , cls ).from_config(lowerCamelCase_ , custom_objects=lowerCamelCase_ )
def UpperCAmelCase__ ( self : Union[str, Any] , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] ) -> int:
super(lowerCamelCase_ , self )._prepare_local(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
__magic_name__ : Any = tf.constant(
self.weight_decay_rate , name='''adam_weight_decay_rate''' )
def UpperCAmelCase__ ( self : Optional[int] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any ) -> Optional[Any]:
__magic_name__ : Optional[int] = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['''weight_decay_rate'''] , use_locking=self._use_locking , )
return tf.no_op()
def UpperCAmelCase__ ( self : str , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : int=None , **lowerCamelCase_ : Union[str, Any] ) -> Optional[int]:
__magic_name__ , __magic_name__ : Dict = list(zip(*lowerCamelCase_ ) )
return super(lowerCamelCase_ , self ).apply_gradients(zip(lowerCamelCase_ , lowerCamelCase_ ) , name=lowerCamelCase_ , **lowerCamelCase_ )
def UpperCAmelCase__ ( self : List[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : Union[str, Any] ) -> Union[str, Any]:
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
__magic_name__ : str = apply_state or {}
__magic_name__ : Optional[Any] = apply_state.get((var_device, var_dtype) )
if coefficients is None:
__magic_name__ : List[str] = self._fallback_apply_state(lowerCamelCase_ , lowerCamelCase_ )
__magic_name__ : List[Any] = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def UpperCAmelCase__ ( self : List[str] , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Dict=None ) -> Optional[Any]:
__magic_name__ , __magic_name__ : Union[str, Any] = self._get_lr(var.device , var.dtype.base_dtype , lowerCamelCase_ )
__magic_name__ : Optional[Any] = self._decay_weights_op(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
with tf.control_dependencies([decay] ):
return super(lowerCamelCase_ , self )._resource_apply_dense(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
def UpperCAmelCase__ ( self : Dict , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Union[str, Any]=None ) -> Any:
__magic_name__ , __magic_name__ : Tuple = self._get_lr(var.device , var.dtype.base_dtype , lowerCamelCase_ )
__magic_name__ : Optional[int] = self._decay_weights_op(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
with tf.control_dependencies([decay] ):
return super(lowerCamelCase_ , self )._resource_apply_sparse(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
def UpperCAmelCase__ ( self : int ) -> Dict:
__magic_name__ : List[Any] = super().get_config()
config.update({'''weight_decay_rate''': self.weight_decay_rate} )
return config
def UpperCAmelCase__ ( self : Dict , lowerCamelCase_ : Union[str, Any] ) -> Union[str, Any]:
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(lowerCamelCase_ , lowerCamelCase_ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(lowerCamelCase_ , lowerCamelCase_ ) is not None:
return False
return True
class lowerCamelCase ( _lowerCamelCase ):
'''simple docstring'''
def __init__( self : Optional[Any] ) -> Any:
__magic_name__ : Optional[int] = []
__magic_name__ : Dict = None
@property
def UpperCAmelCase__ ( self : Optional[int] ) -> Any:
if self._accum_steps is None:
__magic_name__ : Optional[Any] = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=lowerCamelCase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
if not self._gradients:
raise ValueError('''The accumulator should be called first to initialize the gradients''' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : Optional[int] , lowerCamelCase_ : Any ) -> List[str]:
if not self._gradients:
__magic_name__ : int = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(lowerCamelCase_ ) , trainable=lowerCamelCase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(lowerCamelCase_ ) != len(self._gradients ):
raise ValueError(F'''Expected {len(self._gradients )} gradients, but got {len(lowerCamelCase_ )}''' )
for accum_gradient, gradient in zip(self._gradients , lowerCamelCase_ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(lowerCamelCase_ )
self._accum_steps.assign_add(1 )
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(lowerCamelCase_ ) )
| 501 | 0 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class snake_case_ ( __A , __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = 1
@register_to_config
def __init__( self : Union[str, Any] , _UpperCamelCase : Tuple=2_0_0_0 , _UpperCamelCase : str=0.1 , _UpperCamelCase : Any=2_0 , _UpperCamelCase : Optional[int]=1e-3 ) ->List[Any]:
snake_case_ = None
snake_case_ = None
snake_case_ = None
def snake_case__( self : str , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, torch.device] = None ) ->List[Any]:
snake_case_ = torch.linspace(1 , self.config.sampling_eps , _UpperCamelCase , device=_UpperCamelCase )
def snake_case__( self : Optional[Any] , _UpperCamelCase : int , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : int=None ) ->Optional[int]:
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
snake_case_ = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
snake_case_ = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
snake_case_ = std.flatten()
while len(std.shape ) < len(score.shape ):
snake_case_ = std.unsqueeze(-1 )
snake_case_ = -score / std
# compute
snake_case_ = -1.0 / len(self.timesteps )
snake_case_ = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
snake_case_ = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
snake_case_ = beta_t.unsqueeze(-1 )
snake_case_ = -0.5 * beta_t * x
snake_case_ = torch.sqrt(_UpperCamelCase )
snake_case_ = drift - diffusion**2 * score
snake_case_ = x + drift * dt
# add noise
snake_case_ = randn_tensor(x.shape , layout=x.layout , generator=_UpperCamelCase , device=x.device , dtype=x.dtype )
snake_case_ = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : int ) ->Union[str, Any]:
return self.config.num_train_timesteps | 39 |
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if any(not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or x < 0 for x in sequence ):
raise TypeError('''Sequence must be list of non-negative integers''' )
for _ in range(len(SCREAMING_SNAKE_CASE__ ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(SCREAMING_SNAKE_CASE__ , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9] | 39 | 1 |
"""simple docstring"""
def snake_case_ ( A_ : int ):
'''simple docstring'''
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
_lowerCamelCase : Any = 1
_lowerCamelCase : List[Any] = 1
while repunit:
_lowerCamelCase : List[Any] = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def snake_case_ ( A_ : int = 1_00_00_00 ):
'''simple docstring'''
_lowerCamelCase : Any = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(A_ ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F"""{solution() = }""")
| 714 |
"""simple docstring"""
def snake_case_ ( A_ : int ):
'''simple docstring'''
_lowerCamelCase : list[list[int]] = [[0 for _ in range(A_ )] for _ in range(m + 1 )]
for i in range(m + 1 ):
_lowerCamelCase : Union[str, Any] = 1
for n in range(m + 1 ):
for k in range(1, A_ ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
lowerCAmelCase__ = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
lowerCAmelCase__ = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 598 | 0 |
import os
from typing import Dict, List, Tuple, TypeVar, Union
__lowerCAmelCase : Any = TypeVar("T")
__lowerCAmelCase : Dict = Union[List[T], Tuple[T, ...]]
__lowerCAmelCase : Optional[Any] = Union[T, List[T], Dict[str, T]]
__lowerCAmelCase : Tuple = Union[str, bytes, os.PathLike]
| 509 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def __UpperCAmelCase ( __a : bytes ,__a : int ) -> np.array:
"""simple docstring"""
_a : int = F"""{sampling_rate}"""
_a : str = '''1'''
_a : Optional[int] = '''f32le'''
_a : Optional[Any] = [
'''ffmpeg''',
'''-i''',
'''pipe:0''',
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
try:
with subprocess.Popen(__a ,stdin=subprocess.PIPE ,stdout=subprocess.PIPE ) as ffmpeg_process:
_a : Any = ffmpeg_process.communicate(__a )
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to load audio files from filename''' ) from error
_a : Optional[Any] = output_stream[0]
_a : Optional[int] = np.frombuffer(__a ,np.floataa )
if audio.shape[0] == 0:
raise ValueError('''Malformed soundfile''' )
return audio
def __UpperCAmelCase ( __a : int ,__a : float ,__a : str = "f32le" ,) -> str:
"""simple docstring"""
_a : Dict = F"""{sampling_rate}"""
_a : Optional[Any] = '''1'''
if format_for_conversion == "s16le":
_a : Dict = 2
elif format_for_conversion == "f32le":
_a : Optional[Any] = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
_a : Dict = platform.system()
if system == "Linux":
_a : Dict = '''alsa'''
_a : Union[str, Any] = '''default'''
elif system == "Darwin":
_a : Union[str, Any] = '''avfoundation'''
_a : List[str] = ''':0'''
elif system == "Windows":
_a : Optional[int] = '''dshow'''
_a : str = '''default'''
_a : Tuple = [
'''ffmpeg''',
'''-f''',
format_,
'''-i''',
input_,
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-fflags''',
'''nobuffer''',
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
_a : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
_a : str = _ffmpeg_stream(__a ,__a )
for item in iterator:
yield item
def __UpperCAmelCase ( __a : int ,__a : float ,__a : Optional[int] = None ,__a : Optional[Union[Tuple[float, float], float]] = None ,__a : str = "f32le" ,) -> Optional[int]:
"""simple docstring"""
if stream_chunk_s is not None:
_a : Tuple = stream_chunk_s
else:
_a : Tuple = chunk_length_s
_a : Tuple = ffmpeg_microphone(__a ,__a ,format_for_conversion=__a )
if format_for_conversion == "s16le":
_a : Any = np.intaa
_a : Optional[int] = 2
elif format_for_conversion == "f32le":
_a : Dict = np.floataa
_a : List[Any] = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
_a : List[Any] = chunk_length_s / 6
_a : Optional[int] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(__a ,(int, float) ):
_a : Optional[Any] = [stride_length_s, stride_length_s]
_a : Optional[Any] = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
_a : str = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
_a : Optional[Any] = datetime.datetime.now()
_a : Tuple = datetime.timedelta(seconds=__a )
for item in chunk_bytes_iter(__a ,__a ,stride=(stride_left, stride_right) ,stream=__a ):
# Put everything back in numpy scale
_a : Dict = np.frombuffer(item['''raw'''] ,dtype=__a )
_a : Dict = (
item['''stride'''][0] // size_of_sample,
item['''stride'''][1] // size_of_sample,
)
_a : str = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def __UpperCAmelCase ( __a : Optional[int] ,__a : int ,__a : Tuple[int, int] ,__a : bool = False ) -> Optional[int]:
"""simple docstring"""
_a : Any = b''''''
_a , _a : List[str] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
_a : List[str] = 0
for raw in iterator:
acc += raw
if stream and len(__a ) < chunk_len:
_a : Dict = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(__a ) >= chunk_len:
# We are flushing the accumulator
_a : List[str] = (_stride_left, stride_right)
_a : List[Any] = {'''raw''': acc[:chunk_len], '''stride''': stride}
if stream:
_a : List[Any] = False
yield item
_a : Optional[Any] = stride_left
_a : Optional[Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(__a ) > stride_left:
_a : Optional[Any] = {'''raw''': acc, '''stride''': (_stride_left, 0)}
if stream:
_a : Dict = False
yield item
def __UpperCAmelCase ( __a : int ,__a : int ) -> Tuple:
"""simple docstring"""
_a : Dict = 2**24 # 16Mo
try:
with subprocess.Popen(__a ,stdout=subprocess.PIPE ,bufsize=__a ) as ffmpeg_process:
while True:
_a : int = ffmpeg_process.stdout.read(__a )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to stream audio files from filename''' ) from error
| 14 | 0 |
'''simple docstring'''
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class a_ ( enum.Enum ):
lowercase = 0
lowercase = 1
lowercase = 2
@add_end_docstrings(lowerCamelCase )
class a_ ( lowerCamelCase ):
lowercase = """
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
"""
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == """tf""" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
UpperCamelCase = None
if self.model.config.prefix is not None:
UpperCamelCase = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
UpperCamelCase = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
UpperCamelCase ,UpperCamelCase ,UpperCamelCase = self._sanitize_parameters(prefix=_SCREAMING_SNAKE_CASE , **self._forward_params )
UpperCamelCase = {**self._preprocess_params, **preprocess_params}
UpperCamelCase = {**self._forward_params, **forward_params}
def A__ ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> str:
"""simple docstring"""
UpperCamelCase = {}
if prefix is not None:
UpperCamelCase = prefix
if prefix:
UpperCamelCase = self.tokenizer(
_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
UpperCamelCase = prefix_inputs["""input_ids"""].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"
""" [None, 'hole']""" )
UpperCamelCase = handle_long_generation
preprocess_params.update(_SCREAMING_SNAKE_CASE )
UpperCamelCase = generate_kwargs
UpperCamelCase = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_full_text`""" )
if return_tensors is not None:
raise ValueError("""`return_full_text` is mutually exclusive with `return_tensors`""" )
UpperCamelCase = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_tensors`""" )
UpperCamelCase = ReturnType.TENSORS
if return_type is not None:
UpperCamelCase = return_type
if clean_up_tokenization_spaces is not None:
UpperCamelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
UpperCamelCase = self.tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
UpperCamelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def A__ ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"""add_space_before_punct_symbol""": True} )
return super()._parse_and_tokenize(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
return super().__call__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="" , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.tokenizer(
prefix + prompt_text , padding=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
UpperCamelCase = prompt_text
if handle_long_generation == "hole":
UpperCamelCase = inputs["""input_ids"""].shape[-1]
if "max_new_tokens" in generate_kwargs:
UpperCamelCase = generate_kwargs["""max_new_tokens"""]
else:
UpperCamelCase = generate_kwargs.get("""max_length""" , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("""We cannot infer how many new tokens are expected""" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
UpperCamelCase = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"""We cannot use `hole` to handle this generation the number of desired tokens exceeds the"""
""" models max length""" )
UpperCamelCase = inputs["""input_ids"""][:, -keep_length:]
if "attention_mask" in inputs:
UpperCamelCase = inputs["""attention_mask"""][:, -keep_length:]
return inputs
def A__ ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = model_inputs["""input_ids"""]
UpperCamelCase = model_inputs.get("""attention_mask""" , _SCREAMING_SNAKE_CASE )
# Allow empty prompts
if input_ids.shape[1] == 0:
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = 1
else:
UpperCamelCase = input_ids.shape[0]
UpperCamelCase = model_inputs.pop("""prompt_text""" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
UpperCamelCase = generate_kwargs.pop("""prefix_length""" , 0 )
if prefix_length > 0:
UpperCamelCase = """max_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].max_new_tokens is not None
)
if not has_max_new_tokens:
UpperCamelCase = generate_kwargs.get("""max_length""" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
UpperCamelCase = """min_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
UpperCamelCase = self.model.generate(input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCamelCase = generated_sequence.shape[0]
if self.framework == "pt":
UpperCamelCase = generated_sequence.reshape(_SCREAMING_SNAKE_CASE , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
UpperCamelCase = tf.reshape(_SCREAMING_SNAKE_CASE , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=ReturnType.FULL_TEXT , _SCREAMING_SNAKE_CASE=True ) -> str:
"""simple docstring"""
UpperCamelCase = model_outputs["""generated_sequence"""][0]
UpperCamelCase = model_outputs["""input_ids"""]
UpperCamelCase = model_outputs["""prompt_text"""]
UpperCamelCase = generated_sequence.numpy().tolist()
UpperCamelCase = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
UpperCamelCase = {"""generated_token_ids""": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
UpperCamelCase = self.tokenizer.decode(
_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
UpperCamelCase = 0
else:
UpperCamelCase = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE , ) )
if return_type == ReturnType.FULL_TEXT:
UpperCamelCase = prompt_text + text[prompt_length:]
else:
UpperCamelCase = text[prompt_length:]
UpperCamelCase = {"""generated_text""": all_text}
records.append(_SCREAMING_SNAKE_CASE )
return records
| 35 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
SCREAMING_SNAKE_CASE__ = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 35 | 1 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
lowerCamelCase__ : Tuple = """\
@inproceedings{lin-2004-rouge,
title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",
author = \"Lin, Chin-Yew\",
booktitle = \"Text Summarization Branches Out\",
month = jul,
year = \"2004\",
address = \"Barcelona, Spain\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W04-1013\",
pages = \"74--81\",
}
"""
lowerCamelCase__ : List[str] = """\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
"""
lowerCamelCase__ : Optional[int] = """
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,
`\"rougeL\"`: Longest common subsequence based scoring.
`\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric('rouge')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
>>> print(results[\"rouge1\"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results[\"rouge1\"].mid.fmeasure)
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __magic_name__ (datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/google-research/tree/master/rouge'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/ROUGE_(metric)''',
'''https://github.com/google-research/google-research/tree/master/rouge''',
] , )
def SCREAMING_SNAKE_CASE__ ( self:str , _a:Union[str, Any] , _a:List[Any] , _a:str=None , _a:Optional[int]=True , _a:Optional[Any]=False ):
if rouge_types is None:
snake_case__ = ['''rouge1''', '''rouge2''', '''rougeL''', '''rougeLsum''']
snake_case__ = rouge_scorer.RougeScorer(rouge_types=_a , use_stemmer=_a )
if use_aggregator:
snake_case__ = scoring.BootstrapAggregator()
else:
snake_case__ = []
for ref, pred in zip(_a , _a ):
snake_case__ = scorer.score(_a , _a )
if use_aggregator:
aggregator.add_scores(_a )
else:
scores.append(_a )
if use_aggregator:
snake_case__ = aggregator.aggregate()
else:
snake_case__ = {}
for key in scores[0]:
snake_case__ = [score[key] for score in scores]
return result
| 33 |
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE__ :
def __init__( self , a = 6):
lowercase__ : Node | None = None
lowercase__ : Node | None = None
self.create_linked_list(a)
def snake_case_ ( self , a):
lowercase__ : str = Node()
lowercase__ : str = current_node
lowercase__ : str = current_node
lowercase__ : Any = current_node
for _ in range(1 , a):
lowercase__ : List[str] = Node()
lowercase__ : Optional[Any] = current_node
lowercase__ : Union[str, Any] = previous_node
lowercase__ : List[str] = current_node
lowercase__ : Optional[int] = self.front
lowercase__ : str = previous_node
def snake_case_ ( self):
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def snake_case_ ( self):
self.check_can_perform_operation()
return self.front.data if self.front else None
def snake_case_ ( self , a):
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
lowercase__ : Dict = self.rear.next
if self.rear:
lowercase__ : Any = data
def snake_case_ ( self):
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
lowercase__ : Any = self.front.data
lowercase__ : Optional[Any] = None
return data
lowercase__ : Optional[Any] = self.front
lowercase__ : str = old_front.next
lowercase__ : Union[str, Any] = old_front.data
lowercase__ : List[str] = None
return data
def snake_case_ ( self):
if self.is_empty():
raise Exception('Empty Queue')
def snake_case_ ( self):
if self.rear and self.rear.next == self.front:
raise Exception('Full Queue')
class SCREAMING_SNAKE_CASE__ :
def __init__( self):
lowercase__ : Any | None = None
lowercase__ : Node | None = None
lowercase__ : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 164 | 0 |
"""simple docstring"""
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __UpperCamelCase ( a__ , a__ , a__ , unittest.TestCase ):
lowerCamelCase : str =StableUnCLIPPipeline
lowerCamelCase : Optional[Any] =TEXT_TO_IMAGE_PARAMS
lowerCamelCase : List[str] =TEXT_TO_IMAGE_BATCH_PARAMS
lowerCamelCase : str =TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase : int =TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
lowerCamelCase : Union[str, Any] =False
def __a ( self ) -> Any:
a : Optional[int] = 32
a : str = embedder_hidden_size
# prior components
torch.manual_seed(0 )
a : str = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
a : List[Any] = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCAmelCase__ , projection_dim=lowerCAmelCase__ , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
a : Any = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=lowerCAmelCase__ , num_layers=1 , )
torch.manual_seed(0 )
a : Dict = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1000 , clip_sample=lowerCAmelCase__ , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
a : str = StableUnCLIPImageNormalizer(embedding_dim=lowerCAmelCase__ )
a : Optional[int] = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
a : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
a : Optional[int] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCAmelCase__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
a : Dict = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCAmelCase__ , layers_per_block=1 , upcast_attention=lowerCAmelCase__ , use_linear_projection=lowerCAmelCase__ , )
torch.manual_seed(0 )
a : List[Any] = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=lowerCAmelCase__ , steps_offset=1 , )
torch.manual_seed(0 )
a : Union[str, Any] = AutoencoderKL()
a : int = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__=0 ) -> Any:
if str(lowerCAmelCase__ ).startswith("mps" ):
a : Optional[Any] = torch.manual_seed(lowerCAmelCase__ )
else:
a : int = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
a : Any = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __a ( self ) -> Union[str, Any]:
a : Optional[int] = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=lowerCAmelCase__ )
def __a ( self ) -> Tuple:
a : List[str] = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=lowerCAmelCase__ )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ) -> int:
a : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
a : Any = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a : List[str] = torch.Generator(device="cpu" ).manual_seed(0 )
a : Tuple = pipe("anime turle" , generator=lowerCAmelCase__ , output_type="np" )
a : Tuple = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self ) -> Union[str, Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
a : List[Any] = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
a : Union[str, Any] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a : Tuple = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
a : Optional[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 31 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
a : Tuple = None
a : Any = logging.get_logger(__name__)
a : List[Any] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
a : str = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''',
},
}
a : str = {
'''albert-base-v1''': 512,
'''albert-large-v1''': 512,
'''albert-xlarge-v1''': 512,
'''albert-xxlarge-v1''': 512,
'''albert-base-v2''': 512,
'''albert-large-v2''': 512,
'''albert-xlarge-v2''': 512,
'''albert-xxlarge-v2''': 512,
}
a : Union[str, Any] = '''▁'''
class __UpperCamelCase ( a__ ):
lowerCamelCase : Union[str, Any] =VOCAB_FILES_NAMES
lowerCamelCase : Dict =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : List[Any] =AlbertTokenizer
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__="[CLS]" , lowerCAmelCase__="[SEP]" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="[SEP]" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="[CLS]" , lowerCAmelCase__="[MASK]" , **lowerCAmelCase__ , ) -> Union[str, Any]:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
a : Optional[int] = (
AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ , normalized=lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
else mask_token
)
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , remove_space=lowerCAmelCase__ , keep_accents=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
a : Dict = do_lower_case
a : Any = remove_space
a : Optional[Any] = keep_accents
a : List[str] = vocab_file
a : Optional[Any] = False if not self.vocab_file else True
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
a : Optional[Any] = [self.sep_token_id]
a : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
a : Optional[Any] = [self.sep_token_id]
a : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
a : Dict = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file , lowerCAmelCase__ )
return (out_vocab_file,)
| 31 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""uclanlp/visualbert-vqa""": """https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-pre""": """https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-vcr""": """https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-pre""": """https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-nlvr2""": """https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-pre""": """https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"""
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : str = '''visual_bert'''
def __init__( self , UpperCamelCase__=3_0522 , UpperCamelCase__=768 , UpperCamelCase__=512 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=1 , UpperCamelCase__=0 , UpperCamelCase__=2 , **UpperCamelCase__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
snake_case : str = vocab_size
snake_case : int = max_position_embeddings
snake_case : Any = hidden_size
snake_case : Optional[int] = visual_embedding_dim
snake_case : Dict = num_hidden_layers
snake_case : str = num_attention_heads
snake_case : str = intermediate_size
snake_case : int = hidden_act
snake_case : Any = hidden_dropout_prob
snake_case : Union[str, Any] = attention_probs_dropout_prob
snake_case : Optional[int] = initializer_range
snake_case : Optional[int] = type_vocab_size
snake_case : Any = layer_norm_eps
snake_case : Union[str, Any] = bypass_transformer
snake_case : List[str] = special_visual_initialize
| 178 |
"""simple docstring"""
from manim import *
class _lowerCAmelCase ( snake_case_ ):
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : List[str] = Rectangle(height=0.5 , width=0.5 )
snake_case : Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
snake_case : Tuple = [mem.copy() for i in range(6 )]
snake_case : Any = [mem.copy() for i in range(6 )]
snake_case : Tuple = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
snake_case : Optional[Any] = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
snake_case : Optional[Any] = VGroup(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
snake_case : Optional[Any] = Text("CPU" , font_size=24 )
snake_case : Optional[int] = Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCamelCase__ )
snake_case : Optional[Any] = [mem.copy() for i in range(1 )]
snake_case : List[str] = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
snake_case : Optional[int] = Text("GPU" , font_size=24 )
snake_case : Tuple = Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ )
gpu.align_to(UpperCamelCase__ , UpperCamelCase__ )
gpu.set_x(gpu.get_x() - 1 )
self.add(UpperCamelCase__ )
snake_case : Any = [mem.copy() for i in range(6 )]
snake_case : Any = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
snake_case : Dict = Text("Model" , font_size=24 )
snake_case : Optional[Any] = Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ )
model.move_to([3, -1.0, 0] )
self.play(
Create(UpperCamelCase__ , run_time=1 ) , Create(UpperCamelCase__ , run_time=1 ) , Create(UpperCamelCase__ , run_time=1 ) , )
snake_case : Optional[Any] = MarkupText(
F'First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.' , font_size=24 , )
snake_case : Any = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
snake_case : Any = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCamelCase__ , run_time=2.5 ) , Write(UpperCamelCase__ ) , Write(UpperCamelCase__ ) )
self.add(UpperCamelCase__ )
snake_case : Optional[Any] = []
snake_case : Dict = []
snake_case : Union[str, Any] = []
for i, rect in enumerate(UpperCamelCase__ ):
snake_case : Any = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(UpperCamelCase__ , opacity=0.7 )
cpu_target.move_to(UpperCamelCase__ )
cpu_target.generate_target()
snake_case : Optional[int] = 0.46 / 4
snake_case : Any = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=UpperCamelCase__ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=UpperCamelCase__ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=UpperCamelCase__ , buff=0.0 )
cpu_targs.append(UpperCamelCase__ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(UpperCamelCase__ ) )
second_animations.append(MoveToTarget(UpperCamelCase__ , run_time=1.5 ) )
self.play(*UpperCamelCase__ )
self.play(*UpperCamelCase__ )
self.wait()
| 178 | 1 |
"""simple docstring"""
from importlib import import_module
from .logging import get_logger
A__ : List[Any]= get_logger(__name__)
class __lowerCamelCase :
def __init__( self , snake_case_ , snake_case_=None ) -> List[str]:
UpperCamelCase__ = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('__' ):
setattr(self , snake_case_ , getattr(snake_case_ , snake_case_ ) )
UpperCamelCase__ = module._original_module if isinstance(snake_case_ , _PatchedModuleObj ) else module
class __lowerCamelCase :
a : Any =[]
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=None ) -> List[Any]:
UpperCamelCase__ = obj
UpperCamelCase__ = target
UpperCamelCase__ = new
UpperCamelCase__ = target.split('.' )[0]
UpperCamelCase__ = {}
UpperCamelCase__ = attrs or []
def __enter__( self ) -> Optional[int]:
*UpperCamelCase__ , UpperCamelCase__ = self.target.split('.' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(snake_case_ ) ):
try:
UpperCamelCase__ = import_module('.'.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
UpperCamelCase__ = getattr(self.obj , snake_case_ )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(snake_case_ , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
UpperCamelCase__ = obj_attr
# patch at top level
setattr(self.obj , snake_case_ , _PatchedModuleObj(snake_case_ , attrs=self.attrs ) )
UpperCamelCase__ = getattr(self.obj , snake_case_ )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(snake_case_ , snake_case_ , _PatchedModuleObj(getattr(snake_case_ , snake_case_ , snake_case_ ) , attrs=self.attrs ) )
UpperCamelCase__ = getattr(snake_case_ , snake_case_ )
# finally set the target attribute
setattr(snake_case_ , snake_case_ , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
UpperCamelCase__ = getattr(import_module('.'.join(snake_case_ ) ) , snake_case_ )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , snake_case_ ) is attr_value:
UpperCamelCase__ = getattr(self.obj , snake_case_ )
setattr(self.obj , snake_case_ , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
UpperCamelCase__ = globals()['__builtins__'][target_attr]
setattr(self.obj , snake_case_ , self.new )
else:
raise RuntimeError(F'Tried to patch attribute {target_attr} instead of a submodule.' )
def __exit__( self , *snake_case_ ) -> str:
for attr in list(self.original ):
setattr(self.obj , snake_case_ , self.original.pop(snake_case_ ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
self.__enter__()
self._active_patches.append(self )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 20 |
"""simple docstring"""
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
A__ : Any= """src/diffusers"""
# Matches is_xxx_available()
A__ : Tuple= re.compile(r"""is\_([a-z_]*)_available\(\)""")
# Matches from xxx import bla
A__ : Any= re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
A__ : Optional[Any]= """
{0} = None
"""
A__ : List[Any]= """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
"""
A__ : Dict= """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ = _re_backend.findall(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) == 0:
return None
return "_and_".join(SCREAMING_SNAKE_CASE )
def lowerCAmelCase_( ) -> str:
"""simple docstring"""
with open(os.path.join(SCREAMING_SNAKE_CASE , '__init__.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
UpperCamelCase__ = f.readlines()
# Get to the point we do the actual imports for type checking
UpperCamelCase__ = 0
UpperCamelCase__ = {}
# Go through the end of the file
while line_index < len(SCREAMING_SNAKE_CASE ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
UpperCamelCase__ = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('else:' ):
line_index += 1
line_index += 1
UpperCamelCase__ = []
# Until we unindent, add backend objects to the list
while line_index < len(SCREAMING_SNAKE_CASE ) and len(lines[line_index] ) > 1:
UpperCamelCase__ = lines[line_index]
UpperCamelCase__ = _re_single_line_import.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(SCREAMING_SNAKE_CASE ) > 0:
UpperCamelCase__ = objects
else:
line_index += 1
return backend_specific_objects
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(SCREAMING_SNAKE_CASE )
elif name.islower():
return DUMMY_FUNCTION.format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
return DUMMY_CLASS.format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE=None ) -> int:
"""simple docstring"""
if backend_specific_objects is None:
UpperCamelCase__ = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
UpperCamelCase__ = {}
for backend, objects in backend_specific_objects.items():
UpperCamelCase__ = '[' + ', '.join(F'"{b}"' for b in backend.split('_and_' ) ) + ']'
UpperCamelCase__ = '# This file is autogenerated by the command `make fix-copies`, do not edit.\n'
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for o in objects] )
UpperCamelCase__ = dummy_file
return dummy_files
def lowerCAmelCase_( SCREAMING_SNAKE_CASE=False ) -> int:
"""simple docstring"""
UpperCamelCase__ = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
UpperCamelCase__ = {'torch': 'pt'}
# Locate actual dummy modules and read their content.
UpperCamelCase__ = os.path.join(SCREAMING_SNAKE_CASE , 'utils' )
UpperCamelCase__ = {
backend: os.path.join(SCREAMING_SNAKE_CASE , F'dummy_{short_names.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}_objects.py' )
for backend in dummy_files.keys()
}
UpperCamelCase__ = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(SCREAMING_SNAKE_CASE ):
with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
UpperCamelCase__ = f.read()
else:
UpperCamelCase__ = ''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'Updating diffusers.utils.dummy_{short_names.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}_objects.py as the main '
'__init__ has new objects.' )
with open(dummy_file_paths[backend] , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'The main __init__ has objects that are not present in '
F'diffusers.utils.dummy_{short_names.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}_objects.py. Run `make fix-copies` '
'to fix this.' )
if __name__ == "__main__":
A__ : Any= argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
A__ : Optional[int]= parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 20 | 1 |
import operator as op
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Dict:
_a = []
_a = lambda _UpperCAmelCase , _UpperCAmelCase : int(x / y ) # noqa: E731 integer division operation
_a = {
'^': op.pow,
'*': op.mul,
'/': div,
'+': op.add,
'-': op.sub,
} # operators & their respective operation
# print table header
print('Symbol'.center(8 ) , 'Action'.center(12 ) , 'Stack' , sep=' | ' )
print('-' * (30 + len(_UpperCAmelCase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(_UpperCAmelCase ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('push(' + x + ')').ljust(12 ) , ','.join(_UpperCAmelCase ) , sep=' | ' )
else:
_a = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + b + ')').ljust(12 ) , ','.join(_UpperCAmelCase ) , sep=' | ' )
_a = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + a + ')').ljust(12 ) , ','.join(_UpperCAmelCase ) , sep=' | ' )
stack.append(
str(opr[x](int(_UpperCAmelCase ) , int(_UpperCAmelCase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('push(' + a + x + b + ')').ljust(12 ) , ','.join(_UpperCAmelCase ) , sep=' | ' , )
return int(stack[0] )
if __name__ == "__main__":
lowercase_ = input('\n\nEnter a Postfix Equation (space separated) = ').split(' ')
print('\n\tResult = ', solve(Postfix))
| 562 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A = 42
class _UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
@register_to_config
def __init__( self : int , SCREAMING_SNAKE_CASE_ : int = 6_5_5_3_6 , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 0 , SCREAMING_SNAKE_CASE_ : str = "fourier" , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : float = 0.0 , SCREAMING_SNAKE_CASE_ : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , SCREAMING_SNAKE_CASE_ : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , SCREAMING_SNAKE_CASE_ : Tuple[str] = "UNetMidBlock1D" , SCREAMING_SNAKE_CASE_ : str = None , SCREAMING_SNAKE_CASE_ : Tuple[int] = (3_2, 3_2, 6_4) , SCREAMING_SNAKE_CASE_ : str = None , SCREAMING_SNAKE_CASE_ : int = 8 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : bool = False , ):
super().__init__()
_a = sample_size
# time
if time_embedding_type == "fourier":
_a = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=SCREAMING_SNAKE_CASE_ , log=SCREAMING_SNAKE_CASE_ , flip_sin_to_cos=SCREAMING_SNAKE_CASE_ )
_a = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
_a = Timesteps(
block_out_channels[0] , flip_sin_to_cos=SCREAMING_SNAKE_CASE_ , downscale_freq_shift=SCREAMING_SNAKE_CASE_ )
_a = block_out_channels[0]
if use_timestep_embedding:
_a = block_out_channels[0] * 4
_a = TimestepEmbedding(
in_channels=SCREAMING_SNAKE_CASE_ , time_embed_dim=SCREAMING_SNAKE_CASE_ , act_fn=SCREAMING_SNAKE_CASE_ , out_dim=block_out_channels[0] , )
_a = nn.ModuleList([] )
_a = None
_a = nn.ModuleList([] )
_a = None
# down
_a = in_channels
for i, down_block_type in enumerate(SCREAMING_SNAKE_CASE_ ):
_a = output_channel
_a = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
_a = i == len(SCREAMING_SNAKE_CASE_ ) - 1
_a = get_down_block(
SCREAMING_SNAKE_CASE_ , num_layers=SCREAMING_SNAKE_CASE_ , in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(SCREAMING_SNAKE_CASE_ )
# mid
_a = get_mid_block(
SCREAMING_SNAKE_CASE_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=SCREAMING_SNAKE_CASE_ , add_downsample=SCREAMING_SNAKE_CASE_ , )
# up
_a = list(reversed(SCREAMING_SNAKE_CASE_ ) )
_a = reversed_block_out_channels[0]
if out_block_type is None:
_a = out_channels
else:
_a = block_out_channels[0]
for i, up_block_type in enumerate(SCREAMING_SNAKE_CASE_ ):
_a = output_channel
_a = (
reversed_block_out_channels[i + 1] if i < len(SCREAMING_SNAKE_CASE_ ) - 1 else final_upsample_channels
)
_a = i == len(SCREAMING_SNAKE_CASE_ ) - 1
_a = get_up_block(
SCREAMING_SNAKE_CASE_ , num_layers=SCREAMING_SNAKE_CASE_ , in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(SCREAMING_SNAKE_CASE_ )
_a = output_channel
# out
_a = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 3_2 )
_a = get_out_block(
out_block_type=SCREAMING_SNAKE_CASE_ , num_groups_out=SCREAMING_SNAKE_CASE_ , embed_dim=block_out_channels[0] , out_channels=SCREAMING_SNAKE_CASE_ , act_fn=SCREAMING_SNAKE_CASE_ , fc_dim=block_out_channels[-1] // 4 , )
def _UpperCAmelCase ( self : int , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : Union[torch.Tensor, float, int] , SCREAMING_SNAKE_CASE_ : bool = True , ):
_a = timestep
if not torch.is_tensor(SCREAMING_SNAKE_CASE_ ):
_a = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(SCREAMING_SNAKE_CASE_ ) and len(timesteps.shape ) == 0:
_a = timesteps[None].to(sample.device )
_a = self.time_proj(SCREAMING_SNAKE_CASE_ )
if self.config.use_timestep_embedding:
_a = self.time_mlp(SCREAMING_SNAKE_CASE_ )
else:
_a = timestep_embed[..., None]
_a = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
_a = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
_a = ()
for downsample_block in self.down_blocks:
_a , _a = downsample_block(hidden_states=SCREAMING_SNAKE_CASE_ , temb=SCREAMING_SNAKE_CASE_ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
_a = self.mid_block(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
_a = down_block_res_samples[-1:]
_a = down_block_res_samples[:-1]
_a = upsample_block(SCREAMING_SNAKE_CASE_ , res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ , temb=SCREAMING_SNAKE_CASE_ )
# 5. post-process
if self.out_block:
_a = self.out_block(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=SCREAMING_SNAKE_CASE_ )
| 562 | 1 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : list[list[int]] , snake_case_ : int , snake_case_ : int , snake_case_ : set ):
__magic_name__ , __magic_name__ = len(snake_case_ ), len(grid[0] )
if (
min(snake_case_ , snake_case_ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
__magic_name__ = 0
count += depth_first_search(snake_case_ , row + 1 , snake_case_ , snake_case_ )
count += depth_first_search(snake_case_ , row - 1 , snake_case_ , snake_case_ )
count += depth_first_search(snake_case_ , snake_case_ , col + 1 , snake_case_ )
count += depth_first_search(snake_case_ , snake_case_ , col - 1 , snake_case_ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 678 |
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def _SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : Union[str, Any] ):
__magic_name__ = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
__magic_name__ = {
'''wmt16-en-de-dist-12-1''': [28.3, 27.52],
'''wmt16-en-de-dist-6-1''': [27.4, 27.11],
'''wmt16-en-de-12-1''': [26.9, 25.75],
}
__magic_name__ = f'{src_lang}-{tgt_lang}'
__magic_name__ = f'\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "allenai/{model_name}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n'
model_card_dir.mkdir(parents=snake_case_ , exist_ok=snake_case_ )
__magic_name__ = os.path.join(snake_case_ , '''README.md''' )
print(f'Generating {path}' )
with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(snake_case_ )
# make sure we are under the root of the project
a_ : Tuple = Path(__file__).resolve().parent.parent.parent
a_ : Dict = repo_dir / 'model_cards'
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
a_ : List[str] = model_cards_dir / 'allenai' / model_name
write_model_card(model_card_dir, src_lang='en', tgt_lang='de', model_name=model_name) | 678 | 1 |
'''simple docstring'''
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
a = {
'E': 12.70,
'T': 9.06,
'A': 8.17,
'O': 7.51,
'I': 6.97,
'N': 6.75,
'S': 6.33,
'H': 6.09,
'R': 5.99,
'D': 4.25,
'L': 4.03,
'C': 2.78,
'U': 2.76,
'M': 2.41,
'W': 2.36,
'F': 2.23,
'G': 2.02,
'Y': 1.97,
'P': 1.93,
'B': 1.29,
'V': 0.98,
'K': 0.77,
'J': 0.15,
'X': 0.15,
'Q': 0.10,
'Z': 0.07,
}
a = 'ETAOINSHRDLCUMWFGYPBVKJXQZ'
a = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def a_ ( __UpperCAmelCase ) -> int:
"""simple docstring"""
snake_case: List[str] ={letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def a_ ( __UpperCAmelCase ) -> Dict:
"""simple docstring"""
return x[0]
def a_ ( __UpperCAmelCase ) -> Tuple:
"""simple docstring"""
snake_case: Dict =get_letter_count(__lowerCAmelCase )
snake_case: str ={
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(__lowerCAmelCase )
snake_case: Optional[int] ={}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=__lowerCAmelCase )
snake_case: Optional[Any] =''.join(freq_to_letter[freq] )
snake_case: List[Any] =list(freq_to_letter_str.items() )
freq_pairs.sort(key=__lowerCAmelCase , reverse=__lowerCAmelCase )
snake_case: Union[str, Any] =[freq_pair[1] for freq_pair in freq_pairs]
return "".join(__lowerCAmelCase )
def a_ ( __UpperCAmelCase ) -> List[str]:
"""simple docstring"""
snake_case: int =get_frequency_order(__lowerCAmelCase )
snake_case: Tuple =0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350 |
from math import loga
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError("Input value must be a 'int' type" )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 276 | 0 |
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self):
lowerCamelCase__ = tf.convert_to_tensor(
[
[
8.2_2_2_0_9_9_1, # 3rd highest value; idx. 0
-0.5_6_2_0_0_4_4,
5.2_3_2_2_9_7_5_2,
4.0_3_8_6_3_9_3,
-6.8_7_9_8_3_7_8,
-0.5_4_7_8_5_8_0_2,
-3.2_0_1_2_1_5_3,
2.9_2_7_7_7_1_7_6,
1.8_8_1_7_1_9_5_3,
7.3_5_3_4_1_2_7_6, # 5th highest value; idx. 9
8.4_3_2_0_7_8_3_3, # 2nd highest value; idx. 10
-9.8_5_7_1_1_8_3_6,
-5.9_6_2_0_9_2_3_6,
-1.1_3_0_3_9_1_6_1,
-7.1_1_1_5_2_9_4,
-0.8_3_6_9_6_3_3,
-5.3_1_8_6_4_0_8,
7.0_6_4_2_7_4_0_7,
0.8_1_3_6_9_3_4_4,
-0.8_2_0_2_3_8_1_7,
-5.9_1_7_9_7_9_6,
0.5_8_8_1_3_4_4_3,
-6.9_9_7_7_8_4_3_8,
4.7_1_5_5_1_1_8_9,
-0.1_8_7_7_1_6_3_7,
7.4_4_0_2_0_7_5_9, # 4th highest value; idx. 25
9.3_8_4_5_0_9_8_7, # 1st highest value; idx. 26
2.1_2_6_6_2_9_4_1,
-9.3_2_5_6_2_0_3_8,
2.3_5_6_5_2_5_2_2,
], # cummulative prob of 5 highest values <= 0.6
[
0.5_8_4_2_5_5_1_8,
4.5_3_1_3_9_2_3_8,
-5.5_7_5_1_0_4_6_4,
-6.2_8_0_3_0_6_9_9,
-7.1_9_5_2_9_5_0_3,
-4.0_2_1_2_2_5_5_1,
1.3_9_3_3_7_0_3_7,
-6.0_6_7_0_7_0_5_7,
1.5_9_4_8_0_5_1_7,
-9.6_4_3_1_1_9,
0.0_3_9_0_7_7_9_9,
0.6_7_2_3_1_7_6_2,
-8.8_8_2_0_6_7_2_6,
6.2_7_1_1_5_9_2_2, # 4th highest value; idx. 13
2.2_8_5_2_0_7_2_3,
4.8_2_7_6_7_5_0_6,
4.3_0_4_2_1_3_6_8,
8.8_2_7_5_3_1_3, # 2nd highest value; idx. 17
5.4_4_0_2_9_9_5_8, # 5th highest value; idx. 18
-4.4_7_3_5_7_9_4,
7.3_8_5_7_9_5_3_6, # 3rd highest value; idx. 20
-2.9_1_0_5_1_6_6_3,
2.6_1_9_4_6_0_7_7,
-2.5_6_7_4_7_6_2,
-9.4_8_9_5_9_3_0_2,
-4.0_2_9_2_2_6_4_5,
-1.3_5_4_1_6_9_1_8,
9.6_7_7_0_2_3_2_3, # 1st highest value; idx. 27
-5.8_9_4_7_8_5_5_3,
1.8_5_3_7_0_4_6_7,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
lowerCamelCase__ = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
lowerCamelCase__ = tf.convert_to_tensor(
[8.2_2_2_0_9_9, 7.3_5_3_4_1_2_6, 8.4_3_2_0_7_8, 7.4_4_0_2_0_7_5, 9.3_8_4_5_1, 6.2_7_1_1_5_9, 8.8_2_7_5_3_1, 5.4_4_0_2_9_9_5, 7.3_8_5_7_9_5_6, 9.6_7_7_0_2_3] , dtype=tf.floataa , ) # expected non filtered values as noted above
lowerCamelCase__ = tf_top_k_top_p_filtering(_lowerCamelCase , top_k=10 , top_p=0.6 , min_tokens_to_keep=4)
lowerCamelCase__ = output[output != -float("inf")]
lowerCamelCase__ = tf.cast(
tf.where(tf.not_equal(_lowerCamelCase , tf.constant(-float("inf") , dtype=tf.floataa))) , dtype=tf.intaa , )
tf.debugging.assert_near(_lowerCamelCase , _lowerCamelCase , rtol=1E-1_2)
tf.debugging.assert_equal(_lowerCamelCase , _lowerCamelCase)
@require_tf
class snake_case_ ( unittest.TestCase , __UpperCAmelCase ):
"""simple docstring"""
if is_tf_available():
__lowerCAmelCase : Optional[int] ={
"AutoModelForCausalLM": TFAutoModelForCausalLM,
"AutoModelForSpeechSeq2Seq": TFAutoModelForSpeechSeqaSeq,
"AutoModelForSeq2SeqLM": TFAutoModelForSeqaSeqLM,
"AutoModelForVision2Seq": TFAutoModelForVisionaSeq,
"LogitsProcessorList": TFLogitsProcessorList,
"MinLengthLogitsProcessor": TFMinLengthLogitsProcessor,
"create_tensor_fn": tf.convert_to_tensor,
"floats_tensor": floats_tensor,
"return_tensors": "tf",
}
@slow
def __UpperCAmelCase ( self):
# TF-only test: tf.saved_model export
lowerCamelCase__ = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2")
lowerCamelCase__ = 2
lowerCamelCase__ = 2
class snake_case_ ( tf.Module ):
"""simple docstring"""
def __init__( self , UpperCamelCase):
super(_lowerCamelCase , self).__init__()
lowerCamelCase__ = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name="input_ids"),
tf.TensorSpec((None, input_length) , tf.intaa , name="attention_mask"),
) , jit_compile=_lowerCamelCase , )
def __UpperCAmelCase ( self , UpperCamelCase , UpperCamelCase):
lowerCamelCase__ = self.model.generate(
input_ids=_lowerCamelCase , attention_mask=_lowerCamelCase , max_new_tokens=_lowerCamelCase , return_dict_in_generate=_lowerCamelCase , )
return {"sequences": outputs["sequences"]}
lowerCamelCase__ = [[2, 0], [1_02, 1_03]]
lowerCamelCase__ = [[1, 0], [1, 1]]
lowerCamelCase__ = DummyModel(model=_lowerCamelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_lowerCamelCase , _lowerCamelCase , signatures={"serving_default": dummy_model.serving})
lowerCamelCase__ = tf.saved_model.load(_lowerCamelCase).signatures["serving_default"]
for batch_size in range(1 , len(_lowerCamelCase) + 1):
lowerCamelCase__ = {
"input_ids": tf.constant(dummy_input_ids[:batch_size]),
"attention_mask": tf.constant(dummy_attention_masks[:batch_size]),
}
lowerCamelCase__ = serving_func(**_lowerCamelCase)["sequences"]
lowerCamelCase__ = test_model.generate(**_lowerCamelCase , max_new_tokens=_lowerCamelCase)
tf.debugging.assert_equal(_lowerCamelCase , _lowerCamelCase)
@slow
def __UpperCAmelCase ( self):
# TF-only test: tf.saved_model export
lowerCamelCase__ = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2")
lowerCamelCase__ = 1
lowerCamelCase__ = 2
class snake_case_ ( tf.Module ):
"""simple docstring"""
def __init__( self , UpperCamelCase):
super(_lowerCamelCase , self).__init__()
lowerCamelCase__ = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name="input_ids"),
tf.TensorSpec((batch_size, None) , tf.intaa , name="attention_mask"),
) , jit_compile=_lowerCamelCase , )
def __UpperCAmelCase ( self , UpperCamelCase , UpperCamelCase):
lowerCamelCase__ = self.model.generate(
input_ids=_lowerCamelCase , attention_mask=_lowerCamelCase , max_new_tokens=_lowerCamelCase , return_dict_in_generate=_lowerCamelCase , )
return {"sequences": outputs["sequences"]}
lowerCamelCase__ = [[2], [1_02, 1_03]]
lowerCamelCase__ = [[1], [1, 1]]
lowerCamelCase__ = DummyModel(model=_lowerCamelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_lowerCamelCase , _lowerCamelCase , signatures={"serving_default": dummy_model.serving})
lowerCamelCase__ = tf.saved_model.load(_lowerCamelCase).signatures["serving_default"]
for input_row in range(len(_lowerCamelCase)):
lowerCamelCase__ = {
"input_ids": tf.constant([dummy_input_ids[input_row]]),
"attention_mask": tf.constant([dummy_attention_masks[input_row]]),
}
lowerCamelCase__ = serving_func(**_lowerCamelCase)["sequences"]
lowerCamelCase__ = test_model.generate(**_lowerCamelCase , max_new_tokens=_lowerCamelCase)
tf.debugging.assert_equal(_lowerCamelCase , _lowerCamelCase)
@slow
@require_tensorflow_text
def __UpperCAmelCase ( self):
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id="google/flan-t5-small" , filename="spiece.model" , local_dir=_lowerCamelCase)
class snake_case_ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self):
super().__init__()
lowerCamelCase__ = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(_lowerCamelCase , "spiece.model") , "rb").read())
lowerCamelCase__ = TFAutoModelForSeqaSeqLM.from_pretrained("hf-internal-testing/tiny-random-t5")
def __UpperCAmelCase ( self , UpperCamelCase , *UpperCamelCase , **UpperCamelCase):
lowerCamelCase__ = self.tokenizer.tokenize(_lowerCamelCase)
lowerCamelCase__ , lowerCamelCase__ = text.pad_model_inputs(
_lowerCamelCase , max_seq_length=64 , pad_value=self.model.config.pad_token_id)
lowerCamelCase__ = self.model.generate(input_ids=_lowerCamelCase , attention_mask=_lowerCamelCase)
return self.tokenizer.detokenize(_lowerCamelCase)
lowerCamelCase__ = CompleteSentenceTransformer()
lowerCamelCase__ = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="inputs")
lowerCamelCase__ = complete_model(_lowerCamelCase)
lowerCamelCase__ = tf.keras.Model(_lowerCamelCase , _lowerCamelCase)
keras_model.save(_lowerCamelCase)
def __UpperCAmelCase ( self):
# Has PT equivalent: this test relies on random sampling
lowerCamelCase__ = {
"do_sample": True,
"num_beams": 1,
"top_p": 0.7,
"top_k": 10,
"temperature": 0.7,
}
lowerCamelCase__ = 14
lowerCamelCase__ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
lowerCamelCase__ = "Hello, my dog is cute and"
lowerCamelCase__ = tokenizer(_lowerCamelCase , return_tensors="tf")
lowerCamelCase__ = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2")
lowerCamelCase__ = 6_38
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(":/CPU:0"):
tf.random.set_seed(0)
lowerCamelCase__ = model.generate(**_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase)
self.assertTrue(expectation == len(generated_tokens[0]))
lowerCamelCase__ = [6_38, 1_98]
with tf.device(":/CPU:0"):
tf.random.set_seed(0)
lowerCamelCase__ = model.generate(**_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase)
self.assertTrue(expectation == len(generated_tokens[0]))
def __UpperCAmelCase ( self):
# Has PT equivalent: ample use of framework-specific code
lowerCamelCase__ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart")
lowerCamelCase__ = "Hugging Face is a technology company based in New York and Paris."
lowerCamelCase__ = bart_tokenizer(_lowerCamelCase , return_tensors="tf").input_ids
lowerCamelCase__ = TFBartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart")
lowerCamelCase__ = bart_model.generate(_lowerCamelCase).numpy()
class snake_case_ ( __UpperCAmelCase ):
"""simple docstring"""
def __UpperCAmelCase ( self , UpperCamelCase , UpperCamelCase=None , **UpperCamelCase):
return super().call(_lowerCamelCase , **_lowerCamelCase)
lowerCamelCase__ = FakeBart.from_pretrained("hf-internal-testing/tiny-random-bart")
lowerCamelCase__ = bart_model.generate(_lowerCamelCase , foo="bar").numpy()
self.assertTrue(np.array_equal(_lowerCamelCase , _lowerCamelCase))
class snake_case_ ( bart_model.model.encoder.__class__ ):
"""simple docstring"""
def __UpperCAmelCase ( self , UpperCamelCase , **UpperCamelCase):
return super().call(_lowerCamelCase , **_lowerCamelCase)
lowerCamelCase__ = FakeEncoder(bart_model.config , bart_model.model.shared)
lowerCamelCase__ = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
lowerCamelCase__ = bart_model.generate(_lowerCamelCase).numpy()
with self.assertRaises(_lowerCamelCase):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(_lowerCamelCase , foo="bar")
| 701 |
'''simple docstring'''
lowerCAmelCase_ = "Alexander Joslin"
import operator as op
from .stack import Stack
def lowerCAmelCase( a__ : str ):
'''simple docstring'''
lowerCamelCase__ = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
lowerCamelCase__ = Stack()
lowerCamelCase__ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(a__ ) )
elif i in operators:
# RULE 2
operator_stack.push(a__ )
elif i == ")":
# RULE 4
lowerCamelCase__ = operator_stack.peek()
operator_stack.pop()
lowerCamelCase__ = operand_stack.peek()
operand_stack.pop()
lowerCamelCase__ = operand_stack.peek()
operand_stack.pop()
lowerCamelCase__ = operators[opr](a__ , a__ )
operand_stack.push(a__ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCAmelCase_ = "(5 + ((4 * 2) * (2 + 3)))"
# answer = 45
print(f'{equation} = {dijkstras_two_stack_algorithm(equation)}')
| 426 | 0 |
"""simple docstring"""
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = []
for part_id in partition_order:
lowerCamelCase_ = df.where(f"SPARK_PARTITION_ID() = {part_id}" ).collect()
for row_idx, row in enumerate(lowerCAmelCase__ ):
expected_row_ids_and_row_dicts.append((f"{part_id}_{row_idx}", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def lowercase ( ):
lowerCamelCase_ = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
lowerCamelCase_ = spark.range(100 ).repartition(1 )
lowerCamelCase_ = Spark(lowerCAmelCase__ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def lowercase ( ):
lowerCamelCase_ = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
lowerCamelCase_ = spark.range(10 ).repartition(2 )
lowerCamelCase_ = [1, 0]
lowerCamelCase_ = _generate_iterable_examples(lowerCAmelCase__ ,lowerCAmelCase__ ) # Reverse the partitions.
lowerCamelCase_ = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCAmelCase__ ,lowerCAmelCase__ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
lowerCamelCase_ , lowerCamelCase_ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowercase ( ):
lowerCamelCase_ = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
lowerCamelCase_ = spark.range(10 ).repartition(1 )
lowerCamelCase_ = SparkExamplesIterable(lowerCAmelCase__ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(lowerCAmelCase__ ):
assert row_id == f"0_{i}"
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def lowercase ( ):
lowerCamelCase_ = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
lowerCamelCase_ = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
lowerCamelCase_ = lambda lowerCAmelCase__ : x.reverse()
lowerCamelCase_ = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCAmelCase__ ,[2, 1, 0] )
lowerCamelCase_ = SparkExamplesIterable(lowerCAmelCase__ ).shuffle_data_sources(lowerCAmelCase__ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(lowerCAmelCase__ ):
lowerCamelCase_ , lowerCamelCase_ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowercase ( ):
lowerCamelCase_ = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
lowerCamelCase_ = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
lowerCamelCase_ = SparkExamplesIterable(lowerCAmelCase__ ).shard_data_sources(worker_id=0 ,num_workers=2 )
assert shard_it_a.n_shards == 2
lowerCamelCase_ = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCAmelCase__ ,[0, 2] )
for i, (row_id, row_dict) in enumerate(lowerCAmelCase__ ):
lowerCamelCase_ , lowerCamelCase_ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
lowerCamelCase_ = SparkExamplesIterable(lowerCAmelCase__ ).shard_data_sources(worker_id=1 ,num_workers=2 )
assert shard_it_a.n_shards == 2
lowerCamelCase_ = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCAmelCase__ ,[1, 3] )
for i, (row_id, row_dict) in enumerate(lowerCAmelCase__ ):
lowerCamelCase_ , lowerCamelCase_ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowercase ( ):
lowerCamelCase_ = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
lowerCamelCase_ = spark.range(100 ).repartition(1 )
lowerCamelCase_ = Spark(lowerCAmelCase__ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 29 |
'''simple docstring'''
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"
),
}
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 'xlm-prophetnet'
SCREAMING_SNAKE_CASE_ = ['past_key_values']
SCREAMING_SNAKE_CASE_ = {
'num_attention_heads': 'num_encoder_attention_heads',
}
def __init__( self , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = "gelu" , SCREAMING_SNAKE_CASE_ = 30522 , SCREAMING_SNAKE_CASE_ = 1024 , SCREAMING_SNAKE_CASE_ = 4096 , SCREAMING_SNAKE_CASE_ = 12 , SCREAMING_SNAKE_CASE_ = 16 , SCREAMING_SNAKE_CASE_ = 4096 , SCREAMING_SNAKE_CASE_ = 12 , SCREAMING_SNAKE_CASE_ = 16 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 0.02 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = 2 , SCREAMING_SNAKE_CASE_ = 32 , SCREAMING_SNAKE_CASE_ = 128 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 2 , **SCREAMING_SNAKE_CASE_ , ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = encoder_ffn_dim
lowerCamelCase_ = num_encoder_layers
lowerCamelCase_ = num_encoder_attention_heads
lowerCamelCase_ = decoder_ffn_dim
lowerCamelCase_ = num_decoder_layers
lowerCamelCase_ = num_decoder_attention_heads
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = init_std # Normal(0, this parameter)
lowerCamelCase_ = activation_function
# parameters for xlmprophetnet
lowerCamelCase_ = ngram
lowerCamelCase_ = num_buckets
lowerCamelCase_ = relative_max_distance
lowerCamelCase_ = disable_ngram_loss
lowerCamelCase_ = eps
# 3 Types of Dropout
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = activation_dropout
lowerCamelCase_ = dropout
lowerCamelCase_ = use_cache
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , add_cross_attention=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
@property
def UpperCamelCase( self ) -> int:
'''simple docstring'''
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'
' `num_decoder_layers`.' )
| 42 | 0 |
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
return int((input_a, input_a).count(1 ) != 0 )
def snake_case_ ( ):
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1)) | 649 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowerCAmelCase ( __a ):
'''simple docstring'''
def lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__a , """tf_padding""" ) )
self.parent.assertTrue(hasattr(__a , """depth_multiplier""" ) )
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : Tuple , __a : str=13 , __a : Dict=3 , __a : List[Any]=32 , __a : Any=0.25 , __a : Any=8 , __a : Optional[int]=8 , __a : Optional[int]=6 , __a : Dict=32 , __a : Tuple=True , __a : List[Any]=True , __a : Optional[int]=True , __a : Tuple="relu6" , __a : Optional[Any]=1280 , __a : str=0.1 , __a : str=0.02 , __a : Optional[Any]=True , __a : Tuple=True , __a : Dict=10 , __a : Optional[Any]=None , ) -> Any:
"""simple docstring"""
__lowercase : List[str] = parent
__lowercase : Tuple = batch_size
__lowercase : Dict = num_channels
__lowercase : Optional[int] = image_size
__lowercase : int = depth_multiplier
__lowercase : str = depth_divisible_by
__lowercase : int = min_depth
__lowercase : Tuple = expand_ratio
__lowercase : Optional[int] = tf_padding
__lowercase : Dict = output_stride
__lowercase : Dict = first_layer_is_expansion
__lowercase : Optional[Any] = finegrained_output
__lowercase : str = hidden_act
__lowercase : Union[str, Any] = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
__lowercase : Optional[int] = classifier_dropout_prob
__lowercase : int = use_labels
__lowercase : Optional[int] = is_training
__lowercase : Dict = num_labels
__lowercase : Tuple = initializer_range
__lowercase : Optional[Any] = scope
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase : List[Any] = None
__lowercase : Optional[Any] = None
if self.use_labels:
__lowercase : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
__lowercase : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__lowercase : List[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self : Tuple , __a : Dict , __a : Tuple , __a : Optional[int] , __a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[int] = MobileNetVaModel(config=__a )
model.to(__a )
model.eval()
__lowercase : Tuple = model(__a )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def lowerCAmelCase ( self : List[str] , __a : Optional[int] , __a : List[str] , __a : str , __a : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowercase : List[Any] = self.num_labels
__lowercase : Dict = MobileNetVaForImageClassification(__a )
model.to(__a )
model.eval()
__lowercase : Dict = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : int , __a : List[str] , __a : Tuple , __a : Any , __a : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = self.num_labels
__lowercase : List[Any] = MobileNetVaForSemanticSegmentation(__a )
model.to(__a )
model.eval()
__lowercase : Dict = model(__a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__lowercase : str = model(__a , labels=__a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[str] = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase , __lowercase : List[str] = config_and_inputs
__lowercase : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Tuple = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_A : Optional[Any] = (
{
'''feature-extraction''': MobileNetVaModel,
'''image-classification''': MobileNetVaForImageClassification,
'''image-segmentation''': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_A : Tuple = False
_A : List[str] = False
_A : List[str] = False
_A : Optional[int] = False
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = MobileNetVaModelTester(self )
__lowercase : int = MobileNetVaConfigTester(self , config_class=__a , has_text_modality=__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV2 does not use inputs_embeds""" )
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV2 does not support input and output embeddings""" )
def lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV2 does not output attentions""" )
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
pass
def lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowercase , __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : List[Any] = model_class(__a )
__lowercase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : int = [*signature.parameters.keys()]
__lowercase : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
def check_hidden_states_output(__a : List[Any] , __a : Tuple , __a : List[str] ):
__lowercase : Optional[Any] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : List[Any] = model(**self._prepare_for_class(__a , __a ) )
__lowercase : Tuple = outputs.hidden_states
__lowercase : str = 16
self.assertEqual(len(__a ) , __a )
__lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Any = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase : Union[str, Any] = True
check_hidden_states_output(__a , __a , __a )
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__a )
@slow
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : Optional[int] = MobileNetVaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case_ ( ):
__lowercase : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v2_1.0_224""" ) if is_vision_available() else None
)
@slow
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
__lowercase : Tuple = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v2_1.0_224""" ).to(__a )
__lowercase : str = self.default_image_processor
__lowercase : Tuple = prepare_img()
__lowercase : Tuple = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
__lowercase : str = model(**__a )
# verify the logits
__lowercase : Union[str, Any] = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , __a )
__lowercase : str = torch.tensor([0.2445, -1.1993, 0.1905] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase : int = MobileNetVaForSemanticSegmentation.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
__lowercase : Dict = model.to(__a )
__lowercase : Tuple = MobileNetVaImageProcessor.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
__lowercase : List[str] = prepare_img()
__lowercase : Optional[int] = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
__lowercase : Union[str, Any] = model(**__a )
__lowercase : Any = outputs.logits
# verify the logits
__lowercase : Dict = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , __a )
__lowercase : str = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=__a , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __a , atol=1E-4 ) ) | 649 | 1 |
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
lowerCamelCase__ = logging.getLogger(__name__)
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser(
description="Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"
)
parser.add_argument(
"--data_file", type=str, default="data/dump.bert-base-uncased.pickle", help="The binarized dataset."
)
parser.add_argument(
"--token_counts_dump", type=str, default="data/token_counts.bert-base-uncased.pickle", help="The dump file."
)
parser.add_argument("--vocab_size", default=3_0522, type=int)
lowerCamelCase__ = parser.parse_args()
logger.info(F"""Loading data from {args.data_file}""")
with open(args.data_file, "rb") as fp:
lowerCamelCase__ = pickle.load(fp)
logger.info("Counting occurrences for MLM.")
lowerCamelCase__ = Counter()
for tk_ids in data:
counter.update(tk_ids)
lowerCamelCase__ = [0] * args.vocab_size
for k, v in counter.items():
lowerCamelCase__ = v
logger.info(F"""Dump to {args.token_counts_dump}""")
with open(args.token_counts_dump, "wb") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 612 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = ["""image_processor""", """tokenizer"""]
SCREAMING_SNAKE_CASE_ : Tuple = """FlavaImageProcessor"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> int:
SCREAMING_SNAKE_CASE = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE = kwargs.pop('feature_extractor' )
SCREAMING_SNAKE_CASE = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.image_processor
def __call__( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = 0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = True , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> Any:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
SCREAMING_SNAKE_CASE = self.tokenizer(
text=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
if images is not None:
SCREAMING_SNAKE_CASE = self.image_processor(
lowerCAmelCase__ , return_image_mask=lowerCAmelCase__ , return_codebook_pixels=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
if text is not None and images is not None:
encoding.update(lowerCAmelCase__ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase__ ) , tensor_type=lowerCAmelCase__ )
def __A ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __A ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Tuple:
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def __A ( self ) -> Tuple:
SCREAMING_SNAKE_CASE = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __A ( self ) -> Union[str, Any]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowerCAmelCase__ , )
return self.image_processor_class
@property
def __A ( self ) -> str:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , lowerCAmelCase__ , )
return self.image_processor
| 247 | 0 |
'''simple docstring'''
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
_lowercase : Any = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F"""{bindir}/../../examples/pytorch/translation"""):
from run_translation import main # noqa
set_seed(42)
_lowercase : int = 'sshleifer/student_marian_en_ro_6_1'
_lowercase : Dict = 'sshleifer/tiny-mbart'
@require_torch
class UpperCamelCase__( lowerCAmelCase__ ):
def a__( self : str , lowerCAmelCase : Dict=False , lowerCAmelCase : Any=None , lowerCAmelCase : List[str]=True , lowerCAmelCase : Any=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : str=True , )-> Tuple:
"""simple docstring"""
UpperCAmelCase = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=_SCREAMING_SNAKE_CASE , num_train_epochs=1 , distributed=_SCREAMING_SNAKE_CASE , extra_args_str=_SCREAMING_SNAKE_CASE , predict_with_generate=_SCREAMING_SNAKE_CASE , do_train=_SCREAMING_SNAKE_CASE , do_eval=_SCREAMING_SNAKE_CASE , do_predict=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase = TrainerState.load_from_json(os.path.join(_SCREAMING_SNAKE_CASE , '''trainer_state.json''' ) ).log_history
if not do_eval:
return
UpperCAmelCase = [log for log in logs if '''eval_loss''' in log.keys()]
UpperCAmelCase = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
UpperCAmelCase = eval_metrics[-1]
assert isinstance(last_step_stats['''eval_bleu'''] , _SCREAMING_SNAKE_CASE )
assert not math.isnan(float(last_step_stats['''eval_loss'''] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def a__( self : Dict )-> Optional[int]:
"""simple docstring"""
self.run_seqaseq_quick()
@require_torch_multi_gpu
def a__( self : Optional[Any] )-> Optional[Any]:
"""simple docstring"""
self.run_seqaseq_quick(distributed=_SCREAMING_SNAKE_CASE )
@require_torch_multi_gpu
def a__( self : List[Any] )-> List[str]:
"""simple docstring"""
self.run_seqaseq_quick(distributed=_SCREAMING_SNAKE_CASE )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def a__( self : Optional[int] )-> Dict:
"""simple docstring"""
self.run_seqaseq_quick(distributed=_SCREAMING_SNAKE_CASE , extra_args_str='''--sharded_ddp simple''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def a__( self : List[str] )-> List[str]:
"""simple docstring"""
self.run_seqaseq_quick(distributed=_SCREAMING_SNAKE_CASE , extra_args_str='''--sharded_ddp simple --fp16''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def a__( self : str )-> List[Any]:
"""simple docstring"""
self.run_seqaseq_quick(distributed=_SCREAMING_SNAKE_CASE , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=_SCREAMING_SNAKE_CASE )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def a__( self : Optional[Any] )-> Optional[Any]:
"""simple docstring"""
self.run_seqaseq_quick(
distributed=_SCREAMING_SNAKE_CASE , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=_SCREAMING_SNAKE_CASE )
@require_apex
@require_torch_gpu
def a__( self : str )-> List[str]:
"""simple docstring"""
self.run_seqaseq_quick(distributed=_SCREAMING_SNAKE_CASE , extra_args_str='''--fp16 --fp16_backend=apex''' )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=_SCREAMING_SNAKE_CASE , extra_args_str='''--fp16 --fp16_backend=apex''' )
@parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''] )
@require_torch_multi_gpu
def a__( self : List[str] , lowerCAmelCase : List[str] )-> Dict:
"""simple docstring"""
UpperCAmelCase = {
# test with the default log_level - should be info and thus log info once
'''base''': {'''extra_args_str''': '''''', '''n_matches''': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0},
}
UpperCAmelCase = experiments[experiment_id]
UpperCAmelCase = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False}
UpperCAmelCase = '''Running training'''
with CaptureStderr() as cl:
self.run_seqaseq_quick(**_SCREAMING_SNAKE_CASE , extra_args_str=data['''extra_args_str'''] )
UpperCAmelCase = len(re.findall(_SCREAMING_SNAKE_CASE , cl.err ) )
self.assertEqual(_SCREAMING_SNAKE_CASE , data['''n_matches'''] )
@slow
def a__( self : List[str] )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = self.run_trainer(
eval_steps=2 , max_len=128 , model_name=_SCREAMING_SNAKE_CASE , learning_rate=3E-4 , num_train_epochs=10 , distributed=_SCREAMING_SNAKE_CASE , )
# Check metrics
UpperCAmelCase = TrainerState.load_from_json(os.path.join(_SCREAMING_SNAKE_CASE , '''trainer_state.json''' ) ).log_history
UpperCAmelCase = [log for log in logs if '''eval_loss''' in log.keys()]
UpperCAmelCase = eval_metrics[0]
UpperCAmelCase = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['''eval_bleu'''] , _SCREAMING_SNAKE_CASE )
# test if do_predict saves generations and metrics
UpperCAmelCase = os.listdir(_SCREAMING_SNAKE_CASE )
UpperCAmelCase = {os.path.basename(_SCREAMING_SNAKE_CASE ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def a__( self : Optional[Any] )-> Optional[Any]:
"""simple docstring"""
from transformers.training_args import OptimizerNames
def train_and_return_metrics(lowerCAmelCase : Union[str, Any] ) -> Tuple[int, float]:
UpperCAmelCase = '''--skip_memory_metrics 0'''
UpperCAmelCase = self.run_trainer(
max_len=128 , model_name=_SCREAMING_SNAKE_CASE , learning_rate=3E-4 , num_train_epochs=1 , optim=_SCREAMING_SNAKE_CASE , distributed=_SCREAMING_SNAKE_CASE , extra_args_str=_SCREAMING_SNAKE_CASE , do_eval=_SCREAMING_SNAKE_CASE , do_predict=_SCREAMING_SNAKE_CASE , n_gpus_to_use=1 , )
# Check metrics
UpperCAmelCase = TrainerState.load_from_json(Path(_SCREAMING_SNAKE_CASE , '''trainer_state.json''' ) ).log_history
UpperCAmelCase = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**20 )
UpperCAmelCase = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**20 )
UpperCAmelCase = logs[0]['''train_loss''']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
UpperCAmelCase = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
UpperCAmelCase = gpu_peak_mem_orig + gpu_alloc_mem_orig
UpperCAmelCase = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
UpperCAmelCase = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
UpperCAmelCase = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'''
F""" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"""
F""" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB""" , )
self.assertGreater(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'''
F""" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"""
F""" gpu_total_mem_bnb={gpu_total_mem_bnb}MB""" , )
self.assertEqual(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , F"""loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}""" )
def a__( self : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Any , lowerCAmelCase : Any , lowerCAmelCase : int = 3E-3 , lowerCAmelCase : List[Any] = "adafactor" , lowerCAmelCase : Tuple = False , lowerCAmelCase : List[Any] = None , lowerCAmelCase : int = 0 , lowerCAmelCase : List[str] = True , lowerCAmelCase : Dict = True , lowerCAmelCase : int = True , lowerCAmelCase : Any = True , lowerCAmelCase : Union[str, Any] = None , )-> List[str]:
"""simple docstring"""
UpperCAmelCase = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro'''
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = F"""\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(_SCREAMING_SNAKE_CASE )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(_SCREAMING_SNAKE_CASE )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n """.split()
UpperCAmelCase = F"""\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(_SCREAMING_SNAKE_CASE )}\n """.split()
UpperCAmelCase = '''
--do_predict
'''.split()
UpperCAmelCase = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F"""--optim {optim}""".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
UpperCAmelCase = get_gpu_count()
UpperCAmelCase = get_torch_dist_unique_port()
UpperCAmelCase = F"""\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n """.split()
UpperCAmelCase = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(_SCREAMING_SNAKE_CASE , env=self.get_env() )
else:
UpperCAmelCase = ['''run_translation.py'''] + args
with patch.object(_SCREAMING_SNAKE_CASE , '''argv''' , _SCREAMING_SNAKE_CASE ):
main()
return output_dir
| 708 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Tuple = ["image_processor", "tokenizer"]
__magic_name__ : Any = "ViTImageProcessor"
__magic_name__ : str = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : List[str] , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[str]=None , **lowerCAmelCase : Optional[int] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowerCAmelCase , )
UpperCAmelCase = kwargs.pop('''feature_extractor''' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase , lowerCAmelCase )
def __call__( self : str , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Dict=None , lowerCAmelCase : str=None , **lowerCAmelCase : List[str] )-> List[Any]:
"""simple docstring"""
if text is None and visual_prompt is None and images is None:
raise ValueError('''You have to specify either text, visual prompt or images.''' )
if text is not None and visual_prompt is not None:
raise ValueError('''You have to specify exactly one type of prompt. Either text or visual prompt.''' )
if text is not None:
UpperCAmelCase = self.tokenizer(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
if visual_prompt is not None:
UpperCAmelCase = self.image_processor(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
if images is not None:
UpperCAmelCase = self.image_processor(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
if visual_prompt is not None and images is not None:
UpperCAmelCase = {
'''pixel_values''': image_features.pixel_values,
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
UpperCAmelCase = {
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase ) , tensor_type=lowerCAmelCase )
def a__( self : List[str] , *lowerCAmelCase : str , **lowerCAmelCase : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase , **lowerCAmelCase )
def a__( self : Dict , *lowerCAmelCase : Tuple , **lowerCAmelCase : List[str] )-> Optional[Any]:
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase , **lowerCAmelCase )
@property
def a__( self : List[Any] )-> Optional[Any]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowerCAmelCase , )
return self.image_processor_class
@property
def a__( self : Any )-> List[Any]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowerCAmelCase , )
return self.image_processor
| 50 | 0 |
"""simple docstring"""
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
lowerCAmelCase_ : str = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
lowerCAmelCase_ : List[Any] = parser.parse_args()
lowerCAmelCase_ : Optional[Any] = '''cpu'''
lowerCAmelCase_ : int = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
lowerCAmelCase_ : str = '''path-to-your-trained-model'''
lowerCAmelCase_ : Dict = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
lowerCAmelCase_ : Tuple = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
lowerCAmelCase_ : Any = pipe.to(device)
# to channels last
lowerCAmelCase_ : Dict = pipe.unet.to(memory_format=torch.channels_last)
lowerCAmelCase_ : Tuple = pipe.vae.to(memory_format=torch.channels_last)
lowerCAmelCase_ : Optional[Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
lowerCAmelCase_ : Union[str, Any] = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
lowerCAmelCase_ : Optional[Any] = torch.randn(2, 4, 6_4, 6_4)
lowerCAmelCase_ : Optional[int] = torch.rand(1) * 9_9_9
lowerCAmelCase_ : Optional[int] = torch.randn(2, 7_7, 7_6_8)
lowerCAmelCase_ : Tuple = (sample, timestep, encoder_hidden_status)
try:
lowerCAmelCase_ : Dict = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
lowerCAmelCase_ : List[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
lowerCAmelCase_ : Optional[Any] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
lowerCAmelCase_ : Optional[Any] = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
lowerCAmelCase_ : Any = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
lowerCAmelCase_ : Optional[int] = 6_6_6
lowerCAmelCase_ : Dict = torch.Generator(device).manual_seed(seed)
lowerCAmelCase_ : Union[str, Any] = {'''generator''': generator}
if args.steps is not None:
lowerCAmelCase_ : List[str] = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
lowerCAmelCase_ : Dict = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 673 |
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class UpperCamelCase_ ( a_ , unittest.TestCase ):
_A : str = VideoToVideoSDPipeline
_A : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({'video'} ) - {'image', 'width', 'height'}
_A : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'video'} ) - {'image'}
_A : int = PipelineTesterMixin.required_optional_params - {'latents'}
_A : List[str] = False
# No `output_type`.
_A : Any = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
] )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
UpperCAmelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , )
torch.manual_seed(0 )
UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
UpperCAmelCase = CLIPTextModel(snake_case__ )
UpperCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
UpperCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def UpperCamelCase_ ( self , snake_case__ , snake_case__=0 ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
if str(snake_case__ ).startswith("""mps""" ):
UpperCAmelCase = torch.manual_seed(snake_case__ )
else:
UpperCAmelCase = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
UpperCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""video""": video,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = VideoToVideoSDPipeline(**snake_case__ )
UpperCAmelCase = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase = self.get_dummy_inputs(snake_case__ )
UpperCAmelCase = """np"""
UpperCAmelCase = sd_pipe(**snake_case__ ).frames
UpperCAmelCase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
UpperCAmelCase = np.array([1_06, 1_17, 1_13, 1_74, 1_37, 1_12, 1_48, 1_51, 1_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=snake_case__ , expected_max_diff=5e-3 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
return super().test_progress_bar()
@slow
@skip_mps
class UpperCamelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase = VideoToVideoSDPipeline.from_pretrained("""cerspense/zeroscope_v2_XL""" , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
UpperCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCAmelCase = torch.randn((1, 10, 3, 10_24, 5_76) , generator=snake_case__ )
UpperCAmelCase = video.to("""cuda""" )
UpperCAmelCase = """Spiderman is surfing"""
UpperCAmelCase = pipe(snake_case__ , video=snake_case__ , generator=snake_case__ , num_inference_steps=3 , output_type="""pt""" ).frames
UpperCAmelCase = np.array([-1.0_458_984, -1.1_279_297, -0.9_663_086, -0.91_503_906, -0.75_097_656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2
| 673 | 1 |
'''simple docstring'''
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
lowercase_ : Union[str, Any] = '''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
lowercase_ : Tuple = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
lowercase_ : Any = dict(zip(vocab, range(len(vocab))))
lowercase_ : List[str] = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase_ : Tuple = Path(tmpdirname)
lowercase_ : List[Any] = build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
lowercase_ : Union[str, Any] = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
lowercase_ : List[Any] = build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
lowercase_ : List[str] = FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
lowercase_ : Tuple = FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=1000,
tgt_vocab_size=1000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
lowercase_ : List[str] = FSMTForConditionalGeneration(config)
print(F'''num of params {tiny_model.num_parameters()}''')
# Test
lowercase_ : Dict = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
lowercase_ : int = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 709 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
# Initialise PyTorch model
_snake_case : Optional[int] = BertConfig.from_json_file(__lowerCAmelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
_snake_case : List[str] = BertForPreTraining(__lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __lowerCAmelCase )
if __name__ == "__main__":
lowercase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowercase_ : List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 652 | 0 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _snake_case ( ) -> Tuple:
lowerCAmelCase__ = HfArgumentParser(A )
lowerCAmelCase__ = parser.parse_args_into_dataclasses()[0]
lowerCAmelCase__ = TensorFlowBenchmark(args=A )
try:
lowerCAmelCase__ = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowerCAmelCase__ = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
lowerCAmelCase__ = ''' '''.join(str(A ).split(''' ''' )[:-1] )
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = eval(str(A ).split(''' ''' )[-1] )
lowerCAmelCase__ = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(A )
if len(A ) > 0:
lowerCAmelCase__ = full_error_msg + begin_error_msg + str(A )
raise ValueError(A )
benchmark.run()
if __name__ == "__main__":
main() | 90 |
def a_ ( SCREAMING_SNAKE_CASE__ : bytes ):
'''simple docstring'''
return "".join([hex(SCREAMING_SNAKE_CASE__ )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE__ )] )
def a_ ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
if (len(SCREAMING_SNAKE_CASE__ ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(SCREAMING_SNAKE_CASE__ ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 464 | 0 |
'''simple docstring'''
from math import factorial
lowerCamelCase_ : List[Any] = {str(d): factorial(d) for d in range(10)}
def __magic_name__( _A ):
'''simple docstring'''
return sum(DIGIT_FACTORIAL[d] for d in str(_A ) )
def __magic_name__( ):
'''simple docstring'''
UpperCamelCase__ = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , _A ) if sum_of_digit_factorial(_A ) == i )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 265 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__a : Dict = (DPMSolverSinglestepScheduler,)
__a : List[Any] = (("num_inference_steps", 25),)
def A ( self : List[Any] , **lowercase : Any ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = {
"""num_train_timesteps""": 1_0_0_0,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
"""sample_max_value""": 1.0,
"""algorithm_type""": """dpmsolver++""",
"""solver_type""": """midpoint""",
"""lambda_min_clipped""": -float("""inf""" ),
"""variance_type""": None,
}
config.update(**lowercase )
return config
def A ( self : List[Any] , lowercase : Tuple=0 , **lowercase : Optional[Any] ) -> Any:
'''simple docstring'''
UpperCamelCase__ = dict(self.forward_default_kwargs )
UpperCamelCase__ = kwargs.pop("""num_inference_steps""" , lowercase )
UpperCamelCase__ = self.dummy_sample
UpperCamelCase__ = 0.1 * sample
UpperCamelCase__ = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
UpperCamelCase__ = self.get_scheduler_config(**lowercase )
UpperCamelCase__ = scheduler_class(**lowercase )
scheduler.set_timesteps(lowercase )
# copy over dummy past residuals
UpperCamelCase__ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase )
UpperCamelCase__ = scheduler_class.from_pretrained(lowercase )
new_scheduler.set_timesteps(lowercase )
# copy over dummy past residuals
UpperCamelCase__ = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCamelCase__ , UpperCamelCase__ = sample, sample
for t in range(lowercase , time_step + scheduler.config.solver_order + 1 ):
UpperCamelCase__ = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
UpperCamelCase__ = new_scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def A ( self : List[str] ) -> Any:
'''simple docstring'''
pass
def A ( self : Any , lowercase : List[Any]=0 , **lowercase : Optional[int] ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = dict(self.forward_default_kwargs )
UpperCamelCase__ = kwargs.pop("""num_inference_steps""" , lowercase )
UpperCamelCase__ = self.dummy_sample
UpperCamelCase__ = 0.1 * sample
UpperCamelCase__ = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**lowercase )
scheduler.set_timesteps(lowercase )
# copy over dummy past residuals (must be after setting timesteps)
UpperCamelCase__ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase )
UpperCamelCase__ = scheduler_class.from_pretrained(lowercase )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase )
# copy over dummy past residual (must be after setting timesteps)
UpperCamelCase__ = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCamelCase__ = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
UpperCamelCase__ = new_scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def A ( self : Union[str, Any] , lowercase : Any=None , **lowercase : List[Any] ) -> Dict:
'''simple docstring'''
if scheduler is None:
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config(**lowercase )
UpperCamelCase__ = scheduler_class(**lowercase )
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config(**lowercase )
UpperCamelCase__ = scheduler_class(**lowercase )
UpperCamelCase__ = 1_0
UpperCamelCase__ = self.dummy_model()
UpperCamelCase__ = self.dummy_sample_deter
scheduler.set_timesteps(lowercase )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase__ = model(lowercase , lowercase )
UpperCamelCase__ = scheduler.step(lowercase , lowercase , lowercase ).prev_sample
return sample
def A ( self : Any ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
UpperCamelCase__ = 5_0
UpperCamelCase__ = self.dummy_model()
UpperCamelCase__ = self.dummy_sample_deter
scheduler.set_timesteps(lowercase )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
UpperCamelCase__ = model(lowercase , lowercase )
UpperCamelCase__ = scheduler.step(lowercase , lowercase , lowercase ).prev_sample
UpperCamelCase__ = torch.mean(torch.abs(lowercase ) )
assert abs(result_mean.item() - 0.2_5_7_4 ) < 1e-3
def A ( self : int ) -> Union[str, Any]:
'''simple docstring'''
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowercase )
def A ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
UpperCamelCase__ = self.full_loop(scheduler=lowercase )
UpperCamelCase__ = torch.mean(torch.abs(lowercase ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3
UpperCamelCase__ = DEISMultistepScheduler.from_config(scheduler.config )
UpperCamelCase__ = DPMSolverMultistepScheduler.from_config(scheduler.config )
UpperCamelCase__ = UniPCMultistepScheduler.from_config(scheduler.config )
UpperCamelCase__ = DPMSolverSinglestepScheduler.from_config(scheduler.config )
UpperCamelCase__ = self.full_loop(scheduler=lowercase )
UpperCamelCase__ = torch.mean(torch.abs(lowercase ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3
def A ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
self.check_over_configs(thresholding=lowercase )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowercase , prediction_type=lowercase , sample_max_value=lowercase , algorithm_type="""dpmsolver++""" , solver_order=lowercase , solver_type=lowercase , )
def A ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase )
def A ( self : Optional[int] ) -> int:
'''simple docstring'''
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowercase , solver_type=lowercase , prediction_type=lowercase , algorithm_type=lowercase , )
UpperCamelCase__ = self.full_loop(
solver_order=lowercase , solver_type=lowercase , prediction_type=lowercase , algorithm_type=lowercase , )
assert not torch.isnan(lowercase ).any(), "Samples have nan numbers"
def A ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
self.check_over_configs(lower_order_final=lowercase )
self.check_over_configs(lower_order_final=lowercase )
def A ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
self.check_over_configs(lambda_min_clipped=-float("""inf""" ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def A ( self : List[Any] ) -> int:
'''simple docstring'''
self.check_over_configs(variance_type=lowercase )
self.check_over_configs(variance_type="""learned_range""" )
def A ( self : Tuple ) -> int:
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=lowercase , time_step=0 )
def A ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = self.full_loop()
UpperCamelCase__ = torch.mean(torch.abs(lowercase ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3
def A ( self : str ) -> int:
'''simple docstring'''
UpperCamelCase__ = self.full_loop(use_karras_sigmas=lowercase )
UpperCamelCase__ = torch.mean(torch.abs(lowercase ) )
assert abs(result_mean.item() - 0.2_2_4_8 ) < 1e-3
def A ( self : List[Any] ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = self.full_loop(prediction_type="""v_prediction""" )
UpperCamelCase__ = torch.mean(torch.abs(lowercase ) )
assert abs(result_mean.item() - 0.1_4_5_3 ) < 1e-3
def A ( self : int ) -> int:
'''simple docstring'''
UpperCamelCase__ = self.full_loop(prediction_type="""v_prediction""" , use_karras_sigmas=lowercase )
UpperCamelCase__ = torch.mean(torch.abs(lowercase ) )
assert abs(result_mean.item() - 0.0_6_4_9 ) < 1e-3
def A ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config(thresholding=lowercase , dynamic_thresholding_ratio=0 )
UpperCamelCase__ = scheduler_class(**lowercase )
UpperCamelCase__ = 1_0
UpperCamelCase__ = self.dummy_model()
UpperCamelCase__ = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowercase )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase__ = model(lowercase , lowercase )
UpperCamelCase__ = scheduler.step(lowercase , lowercase , lowercase ).prev_sample
assert sample.dtype == torch.floataa
| 265 | 1 |
import random
class _a :
@staticmethod
def __snake_case (SCREAMING_SNAKE_CASE_ ) -> tuple[list[int], list[int]]:
UpperCAmelCase_: List[str] = [ord(SCREAMING_SNAKE_CASE_ ) for i in text]
UpperCAmelCase_: List[Any] = []
UpperCAmelCase_: int = []
for i in plain:
UpperCAmelCase_: str = random.randint(1, 300 )
UpperCAmelCase_: Union[str, Any] = (i + k) * k
cipher.append(SCREAMING_SNAKE_CASE_ )
key.append(SCREAMING_SNAKE_CASE_ )
return cipher, key
@staticmethod
def __snake_case (SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> str:
UpperCAmelCase_: Optional[int] = []
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
UpperCAmelCase_: Dict = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(SCREAMING_SNAKE_CASE_ ) )
return "".join(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
a ,a : Any = Onepad().encrypt('Hello')
print(c, k)
print(Onepad().decrypt(c, k))
| 556 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def lowerCAmelCase_ (*lowerCAmelCase__: Dict ):
"""simple docstring"""
with open(lowerCAmelCase__ , """r""" ) as fh:
fcntl.flock(lowerCAmelCase__ , fcntl.LOCK_EX )
try:
print(*lowerCAmelCase__ )
finally:
fcntl.flock(lowerCAmelCase__ , fcntl.LOCK_UN )
a : Tuple = int(os.environ['LOCAL_RANK'])
torch.cuda.set_device(local_rank)
a : Union[str, Any] = torch.device('cuda', local_rank)
a : Any = socket.gethostname()
a : Optional[Any] = F'''[{hostname}-{local_rank}]'''
try:
# test distributed
dist.init_process_group('nccl')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
a : Optional[Any] = dist.get_rank()
a : List[Any] = dist.get_world_size()
printflock(F'''{gpu} is OK (global rank: {rank}/{world_size})''')
dist.barrier()
if rank == 0:
printflock(F'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''')
except Exception:
printflock(F'''{gpu} is broken''')
raise
| 556 | 1 |
from __future__ import annotations
import math
def _lowerCAmelCase ( _lowerCAmelCase ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(_lowerCAmelCase ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
_lowerCAmelCase = [num for num in range(3, 100_001, 2) if not is_prime(num)]
def _lowerCAmelCase ( _lowerCAmelCase ):
'''simple docstring'''
if not isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
A_ : Optional[Any] = []
for num in range(len(_lowerCAmelCase ) ):
A_ : List[str] = 0
while 2 * i * i <= odd_composites[num]:
A_ : Optional[int] = odd_composites[num] - 2 * i * i
if is_prime(_lowerCAmelCase ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(_lowerCAmelCase ) == n:
return list_nums
return []
def _lowerCAmelCase ( ):
'''simple docstring'''
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 481 |
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _UpperCAmelCase :
def __init__( self , a__ , a__=13 , a__=30 , a__=2 , a__=3 , a__=True , a__=True , a__=32 , a__=5 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=10 , a__=0.02 , a__=3 , a__=0.6 , a__=None , ):
A_ : int = parent
A_ : Optional[int] = batch_size
A_ : Any = image_size
A_ : Optional[int] = patch_size
A_ : int = num_channels
A_ : str = is_training
A_ : str = use_labels
A_ : str = hidden_size
A_ : Union[str, Any] = num_hidden_layers
A_ : Tuple = num_attention_heads
A_ : Any = intermediate_size
A_ : List[Any] = hidden_act
A_ : List[Any] = hidden_dropout_prob
A_ : Optional[int] = attention_probs_dropout_prob
A_ : str = type_sequence_label_size
A_ : int = initializer_range
A_ : List[Any] = mask_ratio
A_ : str = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
A_ : Optional[Any] = (image_size // patch_size) ** 2
A_ : List[Any] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def _lowerCamelCase ( self ):
A_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : int = None
if self.use_labels:
A_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Any = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def _lowerCamelCase ( self , a__ , a__ , a__ ):
A_ : Optional[int] = ViTMAEModel(config=a__ )
model.to(a__ )
model.eval()
A_ : int = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , a__ , a__ , a__ ):
A_ : int = ViTMAEForPreTraining(a__ )
model.to(a__ )
model.eval()
A_ : Optional[Any] = model(a__ )
A_ : Dict = (self.image_size // self.patch_size) ** 2
A_ : Optional[int] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
A_ : Optional[int] = 1
A_ : Any = ViTMAEForPreTraining(a__ )
model.to(a__ )
model.eval()
A_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : List[Any] = model(a__ )
A_ : Optional[int] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def _lowerCamelCase ( self ):
A_ : Optional[Any] = self.prepare_config_and_inputs()
A_ , A_ , A_ : Any = config_and_inputs
A_ : Dict = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
a = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
a = {'''feature-extraction''': ViTMAEModel} if is_torch_available() else {}
a = False
a = False
a = False
a = False
def _lowerCamelCase ( self ):
A_ : int = ViTMAEModelTester(self )
A_ : List[Any] = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 )
def _lowerCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self ):
A_ , A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Dict = model_class(a__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a__ , nn.Linear ) )
def _lowerCamelCase ( self ):
A_ , A_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Any = model_class(a__ )
A_ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : Dict = [*signature.parameters.keys()]
A_ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , a__ )
def _lowerCamelCase ( self ):
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def _lowerCamelCase ( self ):
A_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a__ )
def _lowerCamelCase ( self , a__ , a__ , a__ ):
# make masks reproducible
np.random.seed(2 )
A_ : Optional[Any] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
A_ : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
A_ : Optional[Any] = torch.from_numpy(a__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
A_ : Any = pt_noise
super().check_pt_tf_models(a__ , a__ , a__ )
def _lowerCamelCase ( self ):
A_ , A_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : List[Any] = model_class(a__ )
model.to(a__ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
A_ : Union[str, Any] = model(**self._prepare_for_class(a__ , a__ ) )
A_ : int = outputs[0].cpu().numpy()
A_ : Union[str, Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a__ )
A_ : Union[str, Any] = model_class.from_pretrained(a__ )
model.to(a__ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
A_ : Optional[int] = model(**self._prepare_for_class(a__ , a__ ) )
# Make sure we don't have nans
A_ : Optional[int] = after_outputs[0].cpu().numpy()
A_ : str = 0
A_ : Optional[int] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a__ , 1E-5 )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def _lowerCamelCase ( self ):
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def _lowerCamelCase ( self ):
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def _lowerCamelCase ( self ):
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def _lowerCamelCase ( self ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _lowerCamelCase ( self ):
pass
@slow
def _lowerCamelCase ( self ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Union[str, Any] = ViTMAEModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def _lowerCAmelCase ( ):
'''simple docstring'''
A_ : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self ):
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def _lowerCamelCase ( self ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
A_ : Union[str, Any] = ViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ).to(a__ )
A_ : Optional[Any] = self.default_image_processor
A_ : Union[str, Any] = prepare_img()
A_ : Union[str, Any] = image_processor(images=a__ , return_tensors="""pt""" ).to(a__ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
A_ : Optional[int] = ViTMAEConfig()
A_ : Tuple = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
A_ : Tuple = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
A_ : Dict = model(**a__ , noise=torch.from_numpy(a__ ).to(device=a__ ) )
# verify the logits
A_ : Tuple = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , a__ )
A_ : List[str] = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(a__ ) , atol=1E-4 ) )
| 481 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
lowercase = logging.get_logger(__name__)
class __A( _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = 'upernet'
def __init__( self : Dict , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : Optional[Any]=5_1_2 , __UpperCamelCase : Any=0.02 , __UpperCamelCase : List[str]=[1, 2, 3, 6] , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Optional[Any]=0.4 , __UpperCamelCase : Dict=3_8_4 , __UpperCamelCase : Tuple=2_5_6 , __UpperCamelCase : str=1 , __UpperCamelCase : str=False , __UpperCamelCase : Optional[int]=2_5_5 , **__UpperCamelCase : str , ):
super().__init__(**UpperCamelCase__ )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCamelCase_ = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase_ = backbone_config.get("""model_type""" )
lowerCamelCase_ = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase_ = config_class.from_dict(UpperCamelCase__ )
lowerCamelCase_ = backbone_config
lowerCamelCase_ = hidden_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = pool_scales
lowerCamelCase_ = use_auxiliary_head
lowerCamelCase_ = auxiliary_loss_weight
lowerCamelCase_ = auxiliary_in_channels
lowerCamelCase_ = auxiliary_channels
lowerCamelCase_ = auxiliary_num_convs
lowerCamelCase_ = auxiliary_concat_input
lowerCamelCase_ = loss_ignore_index
def lowercase__ ( self : Optional[int] ):
lowerCamelCase_ = copy.deepcopy(self.__dict__ )
lowerCamelCase_ = self.backbone_config.to_dict()
lowerCamelCase_ = self.__class__.model_type
return output
| 272 |
"""simple docstring"""
def __a ( a = 6_0_0_8_5_1_4_7_5_1_4_3 ):
"""simple docstring"""
try:
_a = int(a )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
_a = 2
_a = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
_a = i
while n % i == 0:
_a = n // i
i += 1
return int(a )
if __name__ == "__main__":
print(f'{solution() = }')
| 388 | 0 |
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class a__ :
def __init__( self , UpperCAmelCase , UpperCAmelCase=1_0_0 , UpperCAmelCase=1_3 , UpperCAmelCase=3_0 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=3_2 , UpperCAmelCase=4 , UpperCAmelCase=4 , UpperCAmelCase=3_7 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=1_0 , UpperCAmelCase=0.02 , UpperCAmelCase=3 , UpperCAmelCase=None , UpperCAmelCase=[0, 1, 2, 3] , ) -> str:
__a = parent
__a = 1_0_0
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = is_training
__a = use_labels
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = type_sequence_label_size
__a = initializer_range
__a = scope
__a = out_indices
__a = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__a = (image_size // patch_size) ** 2
__a = num_patches + 1
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__a = self.get_config()
return config, pixel_values, labels, pixel_labels
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
__a = BeitModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__a = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
__a = BeitForMaskedImageModeling(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__a = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
__a = self.type_sequence_label_size
__a = BeitForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__a = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__a = 1
__a = BeitForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
__a = self.num_labels
__a = BeitForSemanticSegmentation(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__a = model(UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
__a = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
__a = self.prepare_config_and_inputs()
__a , __a , __a , __a = config_and_inputs
__a = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a__ ( __snake_case , __snake_case , unittest.TestCase ):
A__ : List[str] = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
A__ : Any = (
{
'feature-extraction': BeitModel,
'image-classification': BeitForImageClassification,
'image-segmentation': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A__ : Any = False
A__ : List[str] = False
A__ : List[Any] = False
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
__a = BeitModelTester(self )
__a = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase , hidden_size=3_7 )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
@unittest.skip(reason='BEiT does not use inputs_embeds' )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
pass
@require_torch_multi_gpu
@unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> str:
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase , nn.Linear ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(UpperCAmelCase )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
if not self.model_tester.is_training:
return
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(UpperCAmelCase ), BeitForMaskedImageModeling]:
continue
__a = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.train()
__a = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
__a = model(**UpperCAmelCase ).loss
loss.backward()
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__a = False
__a = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(UpperCAmelCase ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
__a = model_class(UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(UpperCAmelCase )
model.train()
__a = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
__a = model(**UpperCAmelCase ).loss
loss.backward()
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = _config_zero_init(UpperCAmelCase )
for model_class in self.all_model_classes:
__a = model_class(config=UpperCAmelCase )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = BeitModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def lowerCAmelCase( ):
__a = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class a__ ( unittest.TestCase ):
@cached_property
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> int:
__a = BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(UpperCAmelCase )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=UpperCAmelCase , return_tensors='pt' ).pixel_values.to(UpperCAmelCase )
# prepare bool_masked_pos
__a = torch.ones((1, 1_9_6) , dtype=torch.bool ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
__a = model(pixel_values=UpperCAmelCase , bool_masked_pos=UpperCAmelCase )
__a = outputs.logits
# verify the logits
__a = torch.Size((1, 1_9_6, 8_1_9_2) )
self.assertEqual(logits.shape , UpperCAmelCase )
__a = torch.tensor(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , UpperCAmelCase , atol=1e-2 ) )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
__a = BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(UpperCAmelCase )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=UpperCAmelCase , return_tensors='pt' ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
__a = model(**UpperCAmelCase )
__a = outputs.logits
# verify the logits
__a = torch.Size((1, 1_0_0_0) )
self.assertEqual(logits.shape , UpperCAmelCase )
__a = torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , UpperCAmelCase , atol=1e-4 ) )
__a = 2_8_1
self.assertEqual(logits.argmax(-1 ).item() , UpperCAmelCase )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
__a = BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to(
UpperCAmelCase )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=UpperCAmelCase , return_tensors='pt' ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
__a = model(**UpperCAmelCase )
__a = outputs.logits
# verify the logits
__a = torch.Size((1, 2_1_8_4_1) )
self.assertEqual(logits.shape , UpperCAmelCase )
__a = torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , UpperCAmelCase , atol=1e-4 ) )
__a = 2_3_9_6
self.assertEqual(logits.argmax(-1 ).item() , UpperCAmelCase )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
__a = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
__a = model.to(UpperCAmelCase )
__a = BeitImageProcessor(do_resize=UpperCAmelCase , size=6_4_0 , do_center_crop=UpperCAmelCase )
__a = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
__a = Image.open(ds[0]['file'] )
__a = image_processor(images=UpperCAmelCase , return_tensors='pt' ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
__a = model(**UpperCAmelCase )
__a = outputs.logits
# verify the logits
__a = torch.Size((1, 1_5_0, 1_6_0, 1_6_0) )
self.assertEqual(logits.shape , UpperCAmelCase )
__a = version.parse(PIL.__version__ ) < version.parse('9.0.0' )
if is_pillow_less_than_a:
__a = torch.tensor(
[
[[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]],
[[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]],
[[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]],
] , device=UpperCAmelCase , )
else:
__a = torch.tensor(
[
[[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]],
[[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]],
[[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]],
] , device=UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCAmelCase , atol=1e-4 ) )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
__a = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
__a = model.to(UpperCAmelCase )
__a = BeitImageProcessor(do_resize=UpperCAmelCase , size=6_4_0 , do_center_crop=UpperCAmelCase )
__a = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
__a = Image.open(ds[0]['file'] )
__a = image_processor(images=UpperCAmelCase , return_tensors='pt' ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
__a = model(**UpperCAmelCase )
__a = outputs.logits.detach().cpu()
__a = image_processor.post_process_semantic_segmentation(outputs=UpperCAmelCase , target_sizes=[(5_0_0, 3_0_0)] )
__a = torch.Size((5_0_0, 3_0_0) )
self.assertEqual(segmentation[0].shape , UpperCAmelCase )
__a = image_processor.post_process_semantic_segmentation(outputs=UpperCAmelCase )
__a = torch.Size((1_6_0, 1_6_0) )
self.assertEqual(segmentation[0].shape , UpperCAmelCase )
| 712 | def lowerCAmelCase( __lowerCamelCase ):
__a = len(__lowerCamelCase )
while cur > 1:
# Find the maximum number in arr
__a = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
__a = arr[mi::-1] + arr[mi + 1 : len(__lowerCamelCase )]
# Reverse whole list
__a = arr[cur - 1 :: -1] + arr[cur : len(__lowerCamelCase )]
cur -= 1
return arr
if __name__ == "__main__":
lowerCamelCase_ : Optional[Any] = input("""Enter numbers separated by a comma:\n""").strip()
lowerCamelCase_ : Optional[int] = [int(item) for item in user_input.split(""",""")]
print(pancake_sort(unsorted))
| 246 | 0 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase :
def __init__( self : Any , _lowercase : List[str] , _lowercase : str=13 , _lowercase : Tuple=32 , _lowercase : List[Any]=3 , _lowercase : Dict=4 , _lowercase : Optional[int]=[10, 20, 30, 40] , _lowercase : int=[2, 2, 3, 2] , _lowercase : List[Any]=True , _lowercase : Dict=True , _lowercase : Optional[int]=37 , _lowercase : Optional[int]="gelu" , _lowercase : Tuple=10 , _lowercase : Tuple=0.02 , _lowercase : Dict=["stage2", "stage3", "stage4"] , _lowercase : Optional[int]=3 , _lowercase : Any=None , ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = parent
SCREAMING_SNAKE_CASE__ : List[str] = batch_size
SCREAMING_SNAKE_CASE__ : Any = image_size
SCREAMING_SNAKE_CASE__ : Dict = num_channels
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_stages
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_sizes
SCREAMING_SNAKE_CASE__ : List[Any] = depths
SCREAMING_SNAKE_CASE__ : Dict = is_training
SCREAMING_SNAKE_CASE__ : str = use_labels
SCREAMING_SNAKE_CASE__ : int = intermediate_size
SCREAMING_SNAKE_CASE__ : int = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Tuple = initializer_range
SCREAMING_SNAKE_CASE__ : List[str] = out_features
SCREAMING_SNAKE_CASE__ : Optional[int] = num_labels
SCREAMING_SNAKE_CASE__ : Optional[Any] = scope
SCREAMING_SNAKE_CASE__ : Any = num_stages
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : str = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : int ):
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def lowercase__ ( self : Optional[int] ):
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_12 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=lowerCamelCase_ , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=2_56 , auxiliary_num_convs=1 , auxiliary_concat_input=lowerCamelCase_ , loss_ignore_index=2_55 , num_labels=self.num_labels , )
def lowercase__ ( self : Optional[int] , _lowercase : Optional[Any] , _lowercase : Union[str, Any] , _lowercase : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : List[Any] = UperNetForSemanticSegmentation(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ : int = model(lowerCamelCase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.prepare_config_and_inputs()
(
SCREAMING_SNAKE_CASE__
) : Union[str, Any] = config_and_inputs
SCREAMING_SNAKE_CASE__ : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( a_ , a_ , unittest.TestCase ):
lowerCamelCase : List[str] = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowerCamelCase : Optional[Any] = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {}
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : Optional[int] = False
lowerCamelCase : Dict = False
lowerCamelCase : List[Any] = False
lowerCamelCase : Dict = False
lowerCamelCase : Dict = False
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : int = UperNetModelTester(self )
SCREAMING_SNAKE_CASE__ : int = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 )
def lowercase__ ( self : str ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self : List[Any] ):
return
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[str] = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : int = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Any = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase_ )
@unittest.skip(reason='''UperNet does not use inputs_embeds''' )
def lowercase__ ( self : List[str] ):
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''' )
def lowercase__ ( self : Any ):
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def lowercase__ ( self : int ):
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def lowercase__ ( self : List[Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def lowercase__ ( self : List[str] ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase__ ( self : Optional[Any] ):
pass
def lowercase__ ( self : str ):
def check_hidden_states_output(_lowercase : str , _lowercase : Any , _lowercase : int ):
SCREAMING_SNAKE_CASE__ : Any = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Dict = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase_ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ : List[Any] = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : str = _config_zero_init(lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ : int = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[str] = model_class(config=lowerCamelCase_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip(reason='''UperNet does not have tied weights''' )
def lowercase__ ( self : Dict ):
pass
@slow
def lowercase__ ( self : Union[str, Any] ):
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : int = UperNetForSemanticSegmentation.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def a ( ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''' , repo_type='''dataset''' , filename='''ADE_val_00000001.jpg''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = Image.open(__lowerCAmelCase ).convert('''RGB''' )
return image
@require_torch
@require_vision
@slow
class lowercase ( unittest.TestCase ):
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Dict = AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' )
SCREAMING_SNAKE_CASE__ : int = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = prepare_img()
SCREAMING_SNAKE_CASE__ : Dict = processor(images=lowerCamelCase_ , return_tensors='''pt''' ).to(lowerCamelCase_ )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : int = model(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ : List[str] = torch.Size((1, model.config.num_labels, 5_12, 5_12) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCamelCase_ , atol=1E-4 ) )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Dict = AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' )
SCREAMING_SNAKE_CASE__ : Any = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_img()
SCREAMING_SNAKE_CASE__ : int = processor(images=lowerCamelCase_ , return_tensors='''pt''' ).to(lowerCamelCase_ )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[str] = model(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ : Dict = torch.Size((1, model.config.num_labels, 5_12, 5_12) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ : int = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCamelCase_ , atol=1E-4 ) )
| 35 |
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(__lowerCAmelCase ) )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
# Base Case
if index == len(__lowerCAmelCase ):
return True
# Recursive Step
for i in range(__lowerCAmelCase ):
if valid_coloring(graph[index] , __lowerCAmelCase , __lowerCAmelCase ):
# Color current vertex
_snake_case : int = i
# Validate coloring
if util_color(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , index + 1 ):
return True
# Backtrack
_snake_case : Optional[Any] = -1
return False
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : str = [-1] * len(__lowerCAmelCase )
if util_color(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , 0 ):
return colored_vertices
return []
| 304 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ : Optional[int] = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : str = [
"IBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"IBertForMaskedLM",
"IBertForMultipleChoice",
"IBertForQuestionAnswering",
"IBertForSequenceClassification",
"IBertForTokenClassification",
"IBertModel",
"IBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 713 |
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
UpperCamelCase__ : Tuple = '''3'''
print('''Python version:''', sys.version)
print('''transformers version:''', transformers.__version__)
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
print('''NCCL version:''', torch.cuda.nccl.version())
except ImportError:
print('''Torch version:''', None)
try:
import deepspeed
print('''DeepSpeed version:''', deepspeed.__version__)
except ImportError:
print('''DeepSpeed version:''', None)
try:
import tensorflow as tf
print('''TensorFlow version:''', tf.__version__)
print('''TF GPUs available:''', bool(tf.config.list_physical_devices('''GPU''')))
print('''Number of TF GPUs available:''', len(tf.config.list_physical_devices('''GPU''')))
except ImportError:
print('''TensorFlow version:''', None) | 178 | 0 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def _lowerCAmelCase ( __snake_case : str = "AAPL" ) -> str:
__A : Optional[Any] = f'https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'
__A : Optional[int] = BeautifulSoup(requests.get(__snake_case ).text , 'html.parser' )
__A : Tuple = 'My(6px) Pos(r) smartphone_Mt(6px)'
return soup.find('div' , class_=class_ ).find('span' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""") | 8 |
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCamelCase_ : Any = logging.get_logger(__name__)
class __lowercase ( __snake_case ):
_A = ["pixel_values"]
def __init__(self : Dict , snake_case : bool = True , snake_case : Dict[str, int] = None , snake_case : PILImageResampling = PILImageResampling.BICUBIC , snake_case : bool = True , snake_case : Dict[str, int] = None , snake_case : bool = True , snake_case : Union[int, float] = 1 / 255 , snake_case : bool = True , snake_case : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , snake_case : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **snake_case : Optional[int] , ) -> None:
super().__init__(**snake_case )
_lowercase : str = size if size is not None else {"shortest_edge": 224}
_lowercase : List[str] = get_size_dict(snake_case , default_to_square=snake_case )
_lowercase : Optional[int] = crop_size if crop_size is not None else {"height": 224, "width": 224}
_lowercase : List[Any] = get_size_dict(snake_case , param_name="crop_size" )
_lowercase : Optional[int] = do_resize
_lowercase : List[str] = size
_lowercase : Tuple = resample
_lowercase : Union[str, Any] = do_center_crop
_lowercase : List[str] = crop_size
_lowercase : Tuple = do_rescale
_lowercase : Union[str, Any] = rescale_factor
_lowercase : str = do_normalize
_lowercase : List[Any] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_lowercase : List[Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _a(self : str , snake_case : np.ndarray , snake_case : Dict[str, int] , snake_case : PILImageResampling = PILImageResampling.BICUBIC , snake_case : Optional[Union[str, ChannelDimension]] = None , **snake_case : str , ) -> np.ndarray:
_lowercase : Tuple = get_size_dict(snake_case , default_to_square=snake_case )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
_lowercase : List[str] = int((256 / 224) * size["shortest_edge"] )
_lowercase : Dict = get_resize_output_image_size(snake_case , size=snake_case , default_to_square=snake_case )
_lowercase : int = {"height": output_size[0], "width": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
snake_case , size=(size_dict["height"], size_dict["width"]) , resample=snake_case , data_format=snake_case , **snake_case )
def _a(self : Any , snake_case : np.ndarray , snake_case : Dict[str, int] , snake_case : Optional[Union[str, ChannelDimension]] = None , **snake_case : Any , ) -> np.ndarray:
_lowercase : Tuple = get_size_dict(snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(snake_case , size=(size["height"], size["width"]) , data_format=snake_case , **snake_case )
def _a(self : Dict , snake_case : np.ndarray , snake_case : Union[int, float] , snake_case : Optional[Union[str, ChannelDimension]] = None , **snake_case : List[str] , ) -> np.ndarray:
return rescale(snake_case , scale=snake_case , data_format=snake_case , **snake_case )
def _a(self : str , snake_case : np.ndarray , snake_case : Union[float, List[float]] , snake_case : Union[float, List[float]] , snake_case : Optional[Union[str, ChannelDimension]] = None , **snake_case : Optional[int] , ) -> np.ndarray:
return normalize(snake_case , mean=snake_case , std=snake_case , data_format=snake_case , **snake_case )
def _a(self : Any , snake_case : ImageInput , snake_case : Optional[bool] = None , snake_case : Optional[Dict[str, int]] = None , snake_case : PILImageResampling = None , snake_case : Optional[bool] = None , snake_case : Optional[Dict[str, int]] = None , snake_case : Optional[bool] = None , snake_case : Optional[float] = None , snake_case : Optional[bool] = None , snake_case : Optional[Union[float, Iterable[float]]] = None , snake_case : Optional[Union[float, Iterable[float]]] = None , snake_case : Optional[TensorType] = None , snake_case : ChannelDimension = ChannelDimension.FIRST , **snake_case : Tuple , ) -> BatchFeature:
_lowercase : Optional[Any] = do_resize if do_resize is not None else self.do_resize
_lowercase : Any = resample if resample is not None else self.resample
_lowercase : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowercase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
_lowercase : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowercase : str = do_normalize if do_normalize is not None else self.do_normalize
_lowercase : Dict = image_mean if image_mean is not None else self.image_mean
_lowercase : Dict = image_std if image_std is not None else self.image_std
_lowercase : Optional[int] = size if size is not None else self.size
_lowercase : Dict = get_size_dict(snake_case , default_to_square=snake_case )
_lowercase : Dict = crop_size if crop_size is not None else self.crop_size
_lowercase : Optional[int] = get_size_dict(snake_case , param_name="crop_size" )
_lowercase : Dict = make_list_of_images(snake_case )
if not valid_images(snake_case ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_lowercase : Dict = [to_numpy_array(snake_case ) for image in images]
if do_resize:
_lowercase : List[Any] = [self.resize(snake_case , snake_case , snake_case ) for image in images]
if do_center_crop:
_lowercase : Union[str, Any] = [self.center_crop(snake_case , snake_case ) for image in images]
if do_rescale:
_lowercase : List[str] = [self.rescale(snake_case , snake_case ) for image in images]
if do_normalize:
_lowercase : str = [self.normalize(snake_case , snake_case , snake_case ) for image in images]
_lowercase : Any = [to_channel_dimension_format(snake_case , snake_case ) for image in images]
_lowercase : str = {"pixel_values": images}
return BatchFeature(data=snake_case , tensor_type=snake_case )
| 461 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase : Optional[int] = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Tuple = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
__lowercase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 713 |
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__lowercase : Union[str, Any] = 16
__lowercase : int = 32
def lowercase ( __A : Optional[Any] ) -> Dict:
'''simple docstring'''
return int(x / 2**20 )
class _A :
'''simple docstring'''
def __enter__( self ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
snake_case : Union[str, Any] = torch.cuda.memory_allocated()
return self
def __exit__( self ,*SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
snake_case : int = torch.cuda.memory_allocated()
snake_case : str = torch.cuda.max_memory_allocated()
snake_case : List[Any] = bamb(self.end - self.begin )
snake_case : str = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def lowercase ( __A : Accelerator , __A : int = 16 , __A : str = "bert-base-cased" , __A : int = 320 , __A : int = 160 , ) -> Union[str, Any]:
'''simple docstring'''
snake_case : List[Any] = AutoTokenizer.from_pretrained(__A )
snake_case : Optional[Any] = load_dataset(
"""glue""" , """mrpc""" , split={"""train""": f"""train[:{n_train}]""", """validation""": f"""validation[:{n_val}]"""} )
def tokenize_function(__A : List[str] ):
# max_length=None => use the model max length (it's actually the default)
snake_case : Optional[int] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__A , max_length=__A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
snake_case : Dict = datasets.map(
__A , batched=__A , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=__A )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case : List[str] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__A : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__A , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(__A , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
snake_case : Dict = DataLoader(
tokenized_datasets["""train"""] , shuffle=__A , collate_fn=__A , batch_size=__A )
snake_case : Optional[Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__A , collate_fn=__A , batch_size=__A )
return train_dataloader, eval_dataloader
def lowercase ( __A : Optional[Any] , __A : Optional[Any] ) -> Any:
'''simple docstring'''
snake_case : int = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case : Any = config["""lr"""]
snake_case : Tuple = int(config["""num_epochs"""] )
snake_case : Union[str, Any] = int(config["""seed"""] )
snake_case : List[Any] = int(config["""batch_size"""] )
snake_case : Optional[int] = args.model_name_or_path
set_seed(__A )
snake_case , snake_case : Any = get_dataloaders(__A , __A , __A , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case : List[str] = AutoModelForSequenceClassification.from_pretrained(__A , return_dict=__A )
# Instantiate optimizer
snake_case : Optional[Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
snake_case : Union[str, Any] = optimizer_cls(params=model.parameters() , lr=__A )
if accelerator.state.deepspeed_plugin is not None:
snake_case : Dict = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
snake_case : List[Any] = 1
snake_case : Tuple = (len(__A ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
snake_case : Optional[int] = get_linear_schedule_with_warmup(
optimizer=__A , num_warmup_steps=0 , num_training_steps=__A , )
else:
snake_case : int = DummyScheduler(__A , total_num_steps=__A , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case , snake_case , snake_case , snake_case , snake_case : int = accelerator.prepare(
__A , __A , __A , __A , __A )
# We need to keep track of how many total steps we have iterated over
snake_case : Optional[Any] = 0
# We also need to keep track of the stating epoch so files are named properly
snake_case : Optional[int] = 0
# Now we train the model
snake_case : Optional[int] = {}
for epoch in range(__A , __A ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(__A ):
snake_case : Dict = model(**__A )
snake_case : str = outputs.loss
snake_case : Any = loss / gradient_accumulation_steps
accelerator.backward(__A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("""Memory before entering the train : {}""".format(bamb(tracemalloc.begin ) ) )
accelerator.print("""Memory consumed at the end of the train (end-begin): {}""".format(tracemalloc.used ) )
accelerator.print("""Peak Memory consumed during the train (max-begin): {}""".format(tracemalloc.peaked ) )
accelerator.print(
"""Total Peak Memory consumed during the train (max): {}""".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
snake_case : Dict = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """peak_memory_utilization.json""" ) , """w""" ) as f:
json.dump(__A , __A )
def lowercase ( ) -> Tuple:
'''simple docstring'''
snake_case : int = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=__A , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=__A , )
parser.add_argument(
"""--output_dir""" , type=__A , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--peak_memory_upper_bound""" , type=__A , default=__A , help="""The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.""" , )
parser.add_argument(
"""--n_train""" , type=__A , default=320 , help="""Number of training examples to use.""" , )
parser.add_argument(
"""--n_val""" , type=__A , default=160 , help="""Number of validation examples to use.""" , )
parser.add_argument(
"""--num_epochs""" , type=__A , default=1 , help="""Number of train epochs.""" , )
snake_case : Optional[Any] = parser.parse_args()
snake_case : Optional[Any] = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(__A , __A )
if __name__ == "__main__":
main()
| 315 | 0 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
UpperCamelCase : List[str] = r'\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `" / "`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `" // "`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `"wiki_dpr"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `"train"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `"compressed"`)\n The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and\n `"compressed"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a "dummy" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n'
@add_start_docstrings(a )
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'rag'
_UpperCamelCase = True
def __init__( self ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=" / " ,_lowerCAmelCase=" // " ,_lowerCAmelCase=5 ,_lowerCAmelCase=3_00 ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=8 ,_lowerCAmelCase="wiki_dpr" ,_lowerCAmelCase="train" ,_lowerCAmelCase="compressed" ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=False ,_lowerCAmelCase=False ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=True ,_lowerCAmelCase=False ,_lowerCAmelCase=False ,_lowerCAmelCase=False ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,**_lowerCAmelCase ,):
super().__init__(
bos_token_id=_lowerCAmelCase ,pad_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,decoder_start_token_id=_lowerCAmelCase ,forced_eos_token_id=_lowerCAmelCase ,is_encoder_decoder=_lowerCAmelCase ,prefix=_lowerCAmelCase ,vocab_size=_lowerCAmelCase ,**_lowerCAmelCase ,)
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
lowerCamelCase__ = kwargs.pop("""question_encoder""" )
lowerCamelCase__ = question_encoder_config.pop("""model_type""" )
lowerCamelCase__ = kwargs.pop("""generator""" )
lowerCamelCase__ = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
lowerCamelCase__ = AutoConfig.for_model(_lowerCAmelCase ,**_lowerCAmelCase )
lowerCamelCase__ = AutoConfig.for_model(_lowerCAmelCase ,**_lowerCAmelCase )
lowerCamelCase__ = reduce_loss
lowerCamelCase__ = label_smoothing
lowerCamelCase__ = exclude_bos_score
lowerCamelCase__ = do_marginalize
lowerCamelCase__ = title_sep
lowerCamelCase__ = doc_sep
lowerCamelCase__ = n_docs
lowerCamelCase__ = max_combined_length
lowerCamelCase__ = dataset
lowerCamelCase__ = dataset_split
lowerCamelCase__ = index_name
lowerCamelCase__ = retrieval_vector_size
lowerCamelCase__ = retrieval_batch_size
lowerCamelCase__ = passages_path
lowerCamelCase__ = index_path
lowerCamelCase__ = use_dummy_dataset
lowerCamelCase__ = output_retrieved
lowerCamelCase__ = do_deduplication
lowerCamelCase__ = use_cache
if self.forced_eos_token_id is None:
lowerCamelCase__ = getattr(self.generator ,"""forced_eos_token_id""" ,_lowerCAmelCase )
@classmethod
def UpperCamelCase_ ( cls ,_lowerCAmelCase ,_lowerCAmelCase ,**_lowerCAmelCase ):
return cls(question_encoder=question_encoder_config.to_dict() ,generator=generator_config.to_dict() ,**_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = copy.deepcopy(self.__dict__ )
lowerCamelCase__ = self.question_encoder.to_dict()
lowerCamelCase__ = self.generator.to_dict()
lowerCamelCase__ = self.__class__.model_type
return output
| 50 |
'''simple docstring'''
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class a__( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase__ = IFPipeline
lowercase__ = TEXT_TO_IMAGE_PARAMS - {"""width""", """height""", """latents"""}
lowercase__ = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase__ = PipelineTesterMixin.required_optional_params - {"""latents"""}
def lowercase_ ( self : List[Any] ):
return self._get_dummy_components()
def lowercase_ ( self : int , __snake_case : List[str] , __snake_case : List[Any]=0 ):
if str(__snake_case ).startswith('mps' ):
a : List[Any] = torch.manual_seed(__snake_case )
else:
a : str = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
a : Dict = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def lowercase_ ( self : Optional[Any] ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def lowercase_ ( self : Union[str, Any] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowercase_ ( self : Tuple ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowercase_ ( self : str ):
self._test_save_load_local()
def lowercase_ ( self : Any ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowercase_ ( self : List[str] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class a__( unittest.TestCase ):
def lowercase_ ( self : Optional[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : Tuple ):
# if
a : List[Any] = IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0' , variant='fp16' , torch_dtype=torch.floataa )
a : Optional[int] = IFSuperResolutionPipeline.from_pretrained(
'DeepFloyd/IF-II-L-v1.0' , variant='fp16' , torch_dtype=torch.floataa , text_encoder=__snake_case , tokenizer=__snake_case )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('cuda' )
a , a : Optional[Any] = pipe_a.encode_prompt('anime turtle' , device='cuda' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
a : List[Any] = None
a : Dict = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(__snake_case , __snake_case , __snake_case , __snake_case )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
a : List[str] = IFImgaImgPipeline(**pipe_a.components )
a : int = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(__snake_case , __snake_case , __snake_case , __snake_case )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
a : Tuple = IFInpaintingPipeline(**pipe_a.components )
a : int = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(__snake_case , __snake_case , __snake_case , __snake_case )
def lowercase_ ( self : Optional[Any] , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : List[str] ):
# pipeline 1
_start_torch_memory_measurement()
a : int = torch.Generator(device='cpu' ).manual_seed(0 )
a : int = pipe_a(
prompt_embeds=__snake_case , negative_prompt_embeds=__snake_case , num_inference_steps=2 , generator=__snake_case , output_type='np' , )
a : Dict = output.images[0]
assert image.shape == (64, 64, 3)
a : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
a : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' )
assert_mean_pixel_difference(__snake_case , __snake_case )
# pipeline 2
_start_torch_memory_measurement()
a : List[str] = torch.Generator(device='cpu' ).manual_seed(0 )
a : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__snake_case )
a : Any = pipe_a(
prompt_embeds=__snake_case , negative_prompt_embeds=__snake_case , image=__snake_case , generator=__snake_case , num_inference_steps=2 , output_type='np' , )
a : Tuple = output.images[0]
assert image.shape == (2_56, 2_56, 3)
a : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
a : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__snake_case , __snake_case )
def lowercase_ ( self : List[str] , __snake_case : str , __snake_case : int , __snake_case : Any , __snake_case : Any ):
# pipeline 1
_start_torch_memory_measurement()
a : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__snake_case )
a : Optional[int] = torch.Generator(device='cpu' ).manual_seed(0 )
a : Any = pipe_a(
prompt_embeds=__snake_case , negative_prompt_embeds=__snake_case , image=__snake_case , num_inference_steps=2 , generator=__snake_case , output_type='np' , )
a : List[Any] = output.images[0]
assert image.shape == (64, 64, 3)
a : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
a : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' )
assert_mean_pixel_difference(__snake_case , __snake_case )
# pipeline 2
_start_torch_memory_measurement()
a : Optional[int] = torch.Generator(device='cpu' ).manual_seed(0 )
a : str = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(__snake_case )
a : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__snake_case )
a : Dict = pipe_a(
prompt_embeds=__snake_case , negative_prompt_embeds=__snake_case , image=__snake_case , original_image=__snake_case , generator=__snake_case , num_inference_steps=2 , output_type='np' , )
a : Optional[Any] = output.images[0]
assert image.shape == (2_56, 2_56, 3)
a : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
a : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__snake_case , __snake_case )
def lowercase_ ( self : List[Any] , __snake_case : Tuple , __snake_case : Any , __snake_case : str , __snake_case : int ):
# pipeline 1
_start_torch_memory_measurement()
a : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__snake_case )
a : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(__snake_case )
a : Union[str, Any] = torch.Generator(device='cpu' ).manual_seed(0 )
a : str = pipe_a(
prompt_embeds=__snake_case , negative_prompt_embeds=__snake_case , image=__snake_case , mask_image=__snake_case , num_inference_steps=2 , generator=__snake_case , output_type='np' , )
a : List[Any] = output.images[0]
assert image.shape == (64, 64, 3)
a : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
a : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' )
assert_mean_pixel_difference(__snake_case , __snake_case )
# pipeline 2
_start_torch_memory_measurement()
a : Union[str, Any] = torch.Generator(device='cpu' ).manual_seed(0 )
a : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__snake_case )
a : Tuple = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(__snake_case )
a : Optional[int] = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(1 ) ).to(__snake_case )
a : int = pipe_a(
prompt_embeds=__snake_case , negative_prompt_embeds=__snake_case , image=__snake_case , mask_image=__snake_case , original_image=__snake_case , generator=__snake_case , num_inference_steps=2 , output_type='np' , )
a : List[str] = output.images[0]
assert image.shape == (2_56, 2_56, 3)
a : Union[str, Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
a : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__snake_case , __snake_case )
def lowerCamelCase__ ( ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats() | 526 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : Optional[int] = {
'configuration_xmod': [
'XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XmodConfig',
'XmodOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : int = [
'XMOD_PRETRAINED_MODEL_ARCHIVE_LIST',
'XmodForCausalLM',
'XmodForMaskedLM',
'XmodForMultipleChoice',
'XmodForQuestionAnswering',
'XmodForSequenceClassification',
'XmodForTokenClassification',
'XmodModel',
'XmodPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
A__ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 244 |
"""simple docstring"""
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
A__ : Optional[Any] = logging.get_logger(__name__)
A__ : Tuple = 'T5Config'
def _snake_case ( lowerCamelCase__ : jnp.array , lowerCamelCase__ : int , lowerCamelCase__ : int ) -> jnp.ndarray:
lowerCamelCase_ : Optional[Any] =jnp.zeros_like(lowerCamelCase__ )
lowerCamelCase_ : Dict =shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
lowerCamelCase_ : Union[str, Any] =shifted_input_ids.at[:, 0].set(lowerCamelCase__ )
lowerCamelCase_ : Optional[int] =jnp.where(shifted_input_ids == -100 , lowerCamelCase__ , lowerCamelCase__ )
return shifted_input_ids
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :Dict = "mt5"
_UpperCAmelCase :Tuple = MTaConfig
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :Any = "mt5"
_UpperCAmelCase :Optional[int] = MTaConfig
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :Any = "mt5"
_UpperCAmelCase :Tuple = MTaConfig
| 244 | 1 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
UpperCAmelCase_ : Any = sys.version_info >= (3, 10)
def snake_case_ ( SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None ):
"""simple docstring"""
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE__ )
@dataclass
class lowercase__ :
'''simple docstring'''
A_ : List[Any] = 42
A_ : Optional[int] = 42
A_ : Optional[int] = 42
A_ : int = 42
@dataclass
class lowercase__ :
'''simple docstring'''
A_ : List[str] = 42
A_ : Dict = field(default="""toto""" , metadata={"""help""": """help message"""} )
@dataclass
class lowercase__ :
'''simple docstring'''
A_ : Optional[Any] = False
A_ : Any = True
A_ : int = None
class lowercase__ ( _snake_case ):
'''simple docstring'''
A_ : str = """titi"""
A_ : Optional[Any] = """toto"""
class lowercase__ ( _snake_case ):
'''simple docstring'''
A_ : str = """titi"""
A_ : Dict = """toto"""
A_ : List[str] = 42
@dataclass
class lowercase__ :
'''simple docstring'''
A_ : List[Any] = """toto"""
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : int = BasicEnum(self.foo )
@dataclass
class lowercase__ :
'''simple docstring'''
A_ : List[Any] = """toto"""
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : int = MixedTypeEnum(self.foo )
@dataclass
class lowercase__ :
'''simple docstring'''
A_ : Optional[int] = None
A_ : Union[str, Any] = field(default=_snake_case , metadata={"""help""": """help message"""} )
A_ : str = None
A_ : int = list_field(default=[] )
A_ : List[Any] = list_field(default=[] )
@dataclass
class lowercase__ :
'''simple docstring'''
A_ : str = list_field(default=[] )
A_ : Union[str, Any] = list_field(default=[1, 2, 3] )
A_ : Tuple = list_field(default=["""Hallo""", """Bonjour""", """Hello"""] )
A_ : Dict = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class lowercase__ :
'''simple docstring'''
A_ : Optional[int] = field()
A_ : List[str] = field()
A_ : Any = field()
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Any = BasicEnum(self.required_enum )
@dataclass
class lowercase__ :
'''simple docstring'''
A_ : Union[str, Any] = 42
A_ : Optional[Any] = field()
A_ : Optional[Any] = None
A_ : Any = field(default="""toto""" , metadata={"""help""": """help message"""} )
A_ : List[str] = list_field(default=["""Hallo""", """Bonjour""", """Hello"""] )
if is_python_no_less_than_3_10:
@dataclass
class lowercase__ :
'''simple docstring'''
A_ : Optional[Any] = False
A_ : Dict = True
A_ : Union[str, Any] = None
@dataclass
class lowercase__ :
'''simple docstring'''
A_ : List[str] = None
A_ : Dict = field(default=_snake_case , metadata={"""help""": """help message"""} )
A_ : Any = None
A_ : int = list_field(default=[] )
A_ : int = list_field(default=[] )
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self , __snake_case , __snake_case ):
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
_SCREAMING_SNAKE_CASE : Dict = {k: v for k, v in vars(_SCREAMING_SNAKE_CASE ).items() if k != '''container'''}
_SCREAMING_SNAKE_CASE : str = {k: v for k, v in vars(_SCREAMING_SNAKE_CASE ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("""choices""" , _SCREAMING_SNAKE_CASE ) and yy.get("""choices""" , _SCREAMING_SNAKE_CASE ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["""type"""](_SCREAMING_SNAKE_CASE ) , yy["""type"""](_SCREAMING_SNAKE_CASE ) )
del xx["type"], yy["type"]
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : List[Any] = HfArgumentParser(_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE )
expected.add_argument("""--bar""" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE )
expected.add_argument("""--baz""" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE )
expected.add_argument("""--flag""" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , const=_SCREAMING_SNAKE_CASE , nargs="""?""" )
self.argparsersEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Tuple = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
(_SCREAMING_SNAKE_CASE ) : List[Any] = parser.parse_args_into_dataclasses(_SCREAMING_SNAKE_CASE , look_for_args_file=_SCREAMING_SNAKE_CASE )
self.assertFalse(example.flag )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : int = HfArgumentParser(_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=42 , type=_SCREAMING_SNAKE_CASE )
expected.add_argument("""--baz""" , default="""toto""" , type=_SCREAMING_SNAKE_CASE , help="""help message""" )
self.argparsersEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , const=_SCREAMING_SNAKE_CASE , nargs="""?""" )
expected.add_argument("""--baz""" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , const=_SCREAMING_SNAKE_CASE , nargs="""?""" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("""--no_baz""" , action="""store_false""" , default=_SCREAMING_SNAKE_CASE , dest="""baz""" )
expected.add_argument("""--opt""" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Dict = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_SCREAMING_SNAKE_CASE )
for dataclass_type in dataclass_types:
_SCREAMING_SNAKE_CASE : str = HfArgumentParser(_SCREAMING_SNAKE_CASE )
self.argparsersEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args([] )
self.assertEqual(_SCREAMING_SNAKE_CASE , Namespace(foo=_SCREAMING_SNAKE_CASE , baz=_SCREAMING_SNAKE_CASE , opt=_SCREAMING_SNAKE_CASE ) )
_SCREAMING_SNAKE_CASE : Tuple = parser.parse_args(["""--foo""", """--no_baz"""] )
self.assertEqual(_SCREAMING_SNAKE_CASE , Namespace(foo=_SCREAMING_SNAKE_CASE , baz=_SCREAMING_SNAKE_CASE , opt=_SCREAMING_SNAKE_CASE ) )
_SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args(["""--foo""", """--baz"""] )
self.assertEqual(_SCREAMING_SNAKE_CASE , Namespace(foo=_SCREAMING_SNAKE_CASE , baz=_SCREAMING_SNAKE_CASE , opt=_SCREAMING_SNAKE_CASE ) )
_SCREAMING_SNAKE_CASE : Any = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] )
self.assertEqual(_SCREAMING_SNAKE_CASE , Namespace(foo=_SCREAMING_SNAKE_CASE , baz=_SCREAMING_SNAKE_CASE , opt=_SCREAMING_SNAKE_CASE ) )
_SCREAMING_SNAKE_CASE : Dict = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] )
self.assertEqual(_SCREAMING_SNAKE_CASE , Namespace(foo=_SCREAMING_SNAKE_CASE , baz=_SCREAMING_SNAKE_CASE , opt=_SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Any = HfArgumentParser(_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=["""titi""", """toto""", 42] , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Any = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
_SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
_SCREAMING_SNAKE_CASE : int = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
_SCREAMING_SNAKE_CASE : int = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def UpperCAmelCase_ ( self ):
@dataclass
class lowercase__ :
'''simple docstring'''
A_ : Dict = """toto"""
_SCREAMING_SNAKE_CASE : Any = HfArgumentParser(_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=("""titi""", """toto""", 42) , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Dict = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
_SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
_SCREAMING_SNAKE_CASE : Dict = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Dict = HfArgumentParser(_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
expected.add_argument("""--foo_int""" , nargs="""+""" , default=[] , type=_SCREAMING_SNAKE_CASE )
expected.add_argument("""--bar_int""" , nargs="""+""" , default=[1, 2, 3] , type=_SCREAMING_SNAKE_CASE )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=_SCREAMING_SNAKE_CASE )
expected.add_argument("""--foo_float""" , nargs="""+""" , default=[0.1, 0.2, 0.3] , type=_SCREAMING_SNAKE_CASE )
self.argparsersEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : int = parser.parse_args([] )
self.assertEqual(
_SCREAMING_SNAKE_CASE , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["""Hallo""", """Bonjour""", """Hello"""] , foo_float=[0.1, 0.2, 0.3] ) , )
_SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() )
self.assertEqual(_SCREAMING_SNAKE_CASE , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["""a""", """b""", """c"""] , foo_float=[0.1, 0.7] ) )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE )
expected.add_argument("""--bar""" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , help="""help message""" )
expected.add_argument("""--baz""" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE )
expected.add_argument("""--ces""" , nargs="""+""" , default=[] , type=_SCREAMING_SNAKE_CASE )
expected.add_argument("""--des""" , nargs="""+""" , default=[] , type=_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : int = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_SCREAMING_SNAKE_CASE )
for dataclass_type in dataclass_types:
_SCREAMING_SNAKE_CASE : Any = HfArgumentParser(_SCREAMING_SNAKE_CASE )
self.argparsersEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args([] )
self.assertEqual(_SCREAMING_SNAKE_CASE , Namespace(foo=_SCREAMING_SNAKE_CASE , bar=_SCREAMING_SNAKE_CASE , baz=_SCREAMING_SNAKE_CASE , ces=[] , des=[] ) )
_SCREAMING_SNAKE_CASE : str = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() )
self.assertEqual(_SCREAMING_SNAKE_CASE , Namespace(foo=12 , bar=3.14 , baz="""42""" , ces=["""a""", """b""", """c"""] , des=[1, 2, 3] ) )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Tuple = HfArgumentParser(_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
expected.add_argument("""--required_list""" , nargs="""+""" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE )
expected.add_argument("""--required_str""" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=_SCREAMING_SNAKE_CASE , )
self.argparsersEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : List[str] = HfArgumentParser(_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=_SCREAMING_SNAKE_CASE , )
expected.add_argument("""--opt""" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE )
expected.add_argument("""--baz""" , default="""toto""" , type=_SCREAMING_SNAKE_CASE , help="""help message""" )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=_SCREAMING_SNAKE_CASE )
self.argparsersEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : List[Any] = HfArgumentParser(_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : List[str] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
_SCREAMING_SNAKE_CASE : str = parser.parse_dict(_SCREAMING_SNAKE_CASE )[0]
_SCREAMING_SNAKE_CASE : int = BasicExample(**_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Optional[Any] = HfArgumentParser(_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 42,
}
self.assertRaises(_SCREAMING_SNAKE_CASE , parser.parse_dict , _SCREAMING_SNAKE_CASE , allow_extra_keys=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Optional[Any] = HfArgumentParser(_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Tuple = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
_SCREAMING_SNAKE_CASE : List[Any] = os.path.join(_SCREAMING_SNAKE_CASE , """temp_json""" )
os.mkdir(_SCREAMING_SNAKE_CASE )
with open(temp_local_path + """.json""" , """w+""" ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0]
_SCREAMING_SNAKE_CASE : Optional[Any] = BasicExample(**_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = HfArgumentParser(_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : List[Any] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
_SCREAMING_SNAKE_CASE : str = os.path.join(_SCREAMING_SNAKE_CASE , """temp_yaml""" )
os.mkdir(_SCREAMING_SNAKE_CASE )
with open(temp_local_path + """.yaml""" , """w+""" ) as f:
yaml.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Dict = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0]
_SCREAMING_SNAKE_CASE : Any = BasicExample(**_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : str = HfArgumentParser(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
| 533 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self )->Any:
'''simple docstring'''
A_ : List[Any] = tempfile.mkdtemp()
A_ : List[Any] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
A_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
A_ : Tuple = {
'''do_resize''': True,
'''size''': {'''height''': 224, '''width''': 224},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
'''do_convert_rgb''': True,
}
A_ : int = os.path.join(self.tmpdirname , _SCREAMING_SNAKE_CASE )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self , **_SCREAMING_SNAKE_CASE )->int:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , **_SCREAMING_SNAKE_CASE )->Tuple:
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , **_SCREAMING_SNAKE_CASE )->str:
'''simple docstring'''
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self )->List[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _snake_case ( self )->List[str]:
'''simple docstring'''
A_ : Optional[int] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ : List[Any] = [Image.fromarray(np.moveaxis(_SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
A_ : Optional[int] = self.get_tokenizer()
A_ : Any = self.get_rust_tokenizer()
A_ : Dict = self.get_image_processor()
A_ : str = ChineseCLIPProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
processor_slow.save_pretrained(self.tmpdirname )
A_ : int = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = ChineseCLIPProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
processor_fast.save_pretrained(self.tmpdirname )
A_ : Optional[Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.tokenizer , _SCREAMING_SNAKE_CASE )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.image_processor , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )->List[Any]:
'''simple docstring'''
A_ : Union[str, Any] = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A_ : List[str] = self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''' )
A_ : Dict = self.get_image_processor(do_normalize=_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=_SCREAMING_SNAKE_CASE )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Any:
'''simple docstring'''
A_ : List[Any] = self.get_image_processor()
A_ : Dict = self.get_tokenizer()
A_ : Tuple = ChineseCLIPProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
A_ : Tuple = self.prepare_image_inputs()
A_ : str = image_processor(_SCREAMING_SNAKE_CASE , return_tensors='''np''' )
A_ : List[str] = processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _snake_case ( self )->List[Any]:
'''simple docstring'''
A_ : str = self.get_image_processor()
A_ : List[str] = self.get_tokenizer()
A_ : Tuple = ChineseCLIPProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = '''Alexandra,T-shirt的价格是15便士。'''
A_ : Any = processor(text=_SCREAMING_SNAKE_CASE )
A_ : Tuple = tokenizer(_SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _snake_case ( self )->Dict:
'''simple docstring'''
A_ : Union[str, Any] = self.get_image_processor()
A_ : Dict = self.get_tokenizer()
A_ : Dict = ChineseCLIPProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = '''Alexandra,T-shirt的价格是15便士。'''
A_ : str = self.prepare_image_inputs()
A_ : Optional[Any] = processor(text=_SCREAMING_SNAKE_CASE , images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_SCREAMING_SNAKE_CASE ):
processor()
def _snake_case ( self )->List[str]:
'''simple docstring'''
A_ : int = self.get_image_processor()
A_ : List[str] = self.get_tokenizer()
A_ : Tuple = ChineseCLIPProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
A_ : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ : List[Any] = processor.batch_decode(_SCREAMING_SNAKE_CASE )
A_ : str = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
A_ : List[str] = self.get_image_processor()
A_ : Tuple = self.get_tokenizer()
A_ : List[Any] = ChineseCLIPProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
A_ : Tuple = '''Alexandra,T-shirt的价格是15便士。'''
A_ : Optional[Any] = self.prepare_image_inputs()
A_ : Union[str, Any] = processor(text=_SCREAMING_SNAKE_CASE , images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 590 | 0 |
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""")
class __SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = True , _UpperCamelCase = False ):
"""simple docstring"""
lowerCAmelCase__ = scheduler
lowerCAmelCase__ = optimizers if isinstance(_UpperCamelCase , (list, tuple) ) else [optimizers]
lowerCAmelCase__ = split_batches
lowerCAmelCase__ = step_with_optimizer
lowerCAmelCase__ = GradientState()
def UpperCamelCase__ ( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*_UpperCamelCase , **_UpperCamelCase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*_UpperCamelCase , **_UpperCamelCase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
lowerCAmelCase__ = AcceleratorState().num_processes
for _ in range(_UpperCamelCase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , 'total_steps' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*_UpperCamelCase , **_UpperCamelCase )
else:
self.scheduler.step(*_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.scheduler.get_last_lr()
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.scheduler.state_dict()
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
self.scheduler.load_state_dict(_UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.scheduler.get_lr()
def UpperCamelCase__ ( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
return self.scheduler.print_lr(*_UpperCamelCase , **_UpperCamelCase )
| 365 |
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__snake_case : int = logging.get_logger("""transformers.models.encodec""")
__snake_case : Tuple = {
"""quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""",
"""quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""",
"""quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""",
"""quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""",
}
__snake_case : List[Any] = {
"""encoder.model.0.conv.conv""": """encoder.layers.0.conv""",
"""encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""",
"""encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""",
"""encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""",
"""encoder.model.3.conv.conv""": """encoder.layers.3.conv""",
"""encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""",
"""encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""",
"""encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""",
"""encoder.model.6.conv.conv""": """encoder.layers.6.conv""",
"""encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""",
"""encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""",
"""encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""",
"""encoder.model.9.conv.conv""": """encoder.layers.9.conv""",
"""encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""",
"""encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""",
"""encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""",
"""encoder.model.12.conv.conv""": """encoder.layers.12.conv""",
"""encoder.model.13.lstm""": """encoder.layers.13.lstm""",
"""encoder.model.15.conv.conv""": """encoder.layers.15.conv""",
}
__snake_case : str = {
"""encoder.model.0.conv.norm""": """encoder.layers.0.norm""",
"""encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""",
"""encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""",
"""encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""",
"""encoder.model.3.conv.norm""": """encoder.layers.3.norm""",
"""encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""",
"""encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""",
"""encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""",
"""encoder.model.6.conv.norm""": """encoder.layers.6.norm""",
"""encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""",
"""encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""",
"""encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""",
"""encoder.model.9.conv.norm""": """encoder.layers.9.norm""",
"""encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""",
"""encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""",
"""encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""",
"""encoder.model.12.conv.norm""": """encoder.layers.12.norm""",
"""encoder.model.15.conv.norm""": """encoder.layers.15.norm""",
}
__snake_case : str = {
"""decoder.model.0.conv.conv""": """decoder.layers.0.conv""",
"""decoder.model.1.lstm""": """decoder.layers.1.lstm""",
"""decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""",
"""decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""",
"""decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""",
"""decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""",
"""decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""",
"""decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""",
"""decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""",
"""decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""",
"""decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""",
"""decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""",
"""decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""",
"""decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""",
"""decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""",
"""decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""",
"""decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""",
"""decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""",
"""decoder.model.15.conv.conv""": """decoder.layers.15.conv""",
}
__snake_case : Any = {
"""decoder.model.0.conv.norm""": """decoder.layers.0.norm""",
"""decoder.model.3.convtr.norm""": """decoder.layers.3.norm""",
"""decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""",
"""decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""",
"""decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""",
"""decoder.model.6.convtr.norm""": """decoder.layers.6.norm""",
"""decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""",
"""decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""",
"""decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""",
"""decoder.model.9.convtr.norm""": """decoder.layers.9.norm""",
"""decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""",
"""decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""",
"""decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""",
"""decoder.model.12.convtr.norm""": """decoder.layers.12.norm""",
"""decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""",
"""decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""",
"""decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""",
"""decoder.model.15.conv.norm""": """decoder.layers.15.norm""",
}
__snake_case : int = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__snake_case : int = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__snake_case : Union[str, Any] = []
__snake_case : Tuple = []
def _UpperCamelCase ( UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int] ) -> List[str]:
"""simple docstring"""
for attribute in key.split('.' ):
lowerCAmelCase__ = getattr(UpperCamelCase_ , UpperCamelCase_ )
if weight_type is not None:
lowerCAmelCase__ = getattr(UpperCamelCase_ , UpperCamelCase_ ).shape
else:
lowerCAmelCase__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
lowerCAmelCase__ = value
elif weight_type == "weight_g":
lowerCAmelCase__ = value
elif weight_type == "weight_v":
lowerCAmelCase__ = value
elif weight_type == "bias":
lowerCAmelCase__ = value
elif weight_type == "running_mean":
lowerCAmelCase__ = value
elif weight_type == "running_var":
lowerCAmelCase__ = value
elif weight_type == "num_batches_tracked":
lowerCAmelCase__ = value
elif weight_type == "weight_ih_l0":
lowerCAmelCase__ = value
elif weight_type == "weight_hh_l0":
lowerCAmelCase__ = value
elif weight_type == "bias_ih_l0":
lowerCAmelCase__ = value
elif weight_type == "bias_hh_l0":
lowerCAmelCase__ = value
elif weight_type == "weight_ih_l1":
lowerCAmelCase__ = value
elif weight_type == "weight_hh_l1":
lowerCAmelCase__ = value
elif weight_type == "bias_ih_l1":
lowerCAmelCase__ = value
elif weight_type == "bias_hh_l1":
lowerCAmelCase__ = value
else:
lowerCAmelCase__ = value
logger.info(F"{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}." )
def _UpperCamelCase ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[str] ) -> Tuple:
"""simple docstring"""
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowerCAmelCase__ , lowerCAmelCase__ = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _UpperCamelCase ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : str ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ = []
if model_name == "encodec_24khz" or "encodec_32khz":
lowerCAmelCase__ = MAPPING_24K
elif model_name == "encodec_48khz":
lowerCAmelCase__ = MAPPING_48K
else:
raise ValueError(F"Unsupported model: {model_name}" )
for name, value in orig_dict.items():
if should_ignore(UpperCamelCase_ , UpperCamelCase_ ):
logger.info(F"{name} was ignored" )
continue
lowerCAmelCase__ = False
for key, mapped_key in MAPPING.items():
if "*" in key:
lowerCAmelCase__ , lowerCAmelCase__ = key.split('.*.' )
if prefix in name and suffix in name:
lowerCAmelCase__ = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('embed' ) and name.endswith('embed_avg' ):
continue
lowerCAmelCase__ = True
if "*" in mapped_key:
lowerCAmelCase__ = name.split(UpperCamelCase_ )[0].split('.' )[-2]
lowerCAmelCase__ = mapped_key.replace('*' , UpperCamelCase_ )
if "weight_g" in name:
lowerCAmelCase__ = 'weight_g'
elif "weight_v" in name:
lowerCAmelCase__ = 'weight_v'
elif "weight_ih_l0" in name:
lowerCAmelCase__ = 'weight_ih_l0'
elif "weight_hh_l0" in name:
lowerCAmelCase__ = 'weight_hh_l0'
elif "bias_ih_l0" in name:
lowerCAmelCase__ = 'bias_ih_l0'
elif "bias_hh_l0" in name:
lowerCAmelCase__ = 'bias_hh_l0'
elif "weight_ih_l1" in name:
lowerCAmelCase__ = 'weight_ih_l1'
elif "weight_hh_l1" in name:
lowerCAmelCase__ = 'weight_hh_l1'
elif "bias_ih_l1" in name:
lowerCAmelCase__ = 'bias_ih_l1'
elif "bias_hh_l1" in name:
lowerCAmelCase__ = 'bias_hh_l1'
elif "bias" in name:
lowerCAmelCase__ = 'bias'
elif "weight" in name:
lowerCAmelCase__ = 'weight'
elif "running_mean" in name:
lowerCAmelCase__ = 'running_mean'
elif "running_var" in name:
lowerCAmelCase__ = 'running_var'
elif "num_batches_tracked" in name:
lowerCAmelCase__ = 'num_batches_tracked'
else:
lowerCAmelCase__ = None
set_recursively(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
continue
if not is_used:
unused_weights.append(UpperCamelCase_ )
logger.warning(F"Unused weights: {unused_weights}" )
@torch.no_grad()
def _UpperCamelCase ( UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : List[Any]=None , ) -> Dict:
"""simple docstring"""
if config_path is not None:
lowerCAmelCase__ = EncodecConfig.from_pretrained(UpperCamelCase_ )
else:
lowerCAmelCase__ = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
lowerCAmelCase__ = [8, 5, 4, 4]
lowerCAmelCase__ = [2.2]
lowerCAmelCase__ = 64
lowerCAmelCase__ = 3_2000
lowerCAmelCase__ = 2048
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
elif model_name == "encodec_48khz":
lowerCAmelCase__ = [8, 5, 4, 2]
lowerCAmelCase__ = [3.0, 6.0, 12.0, 24.0]
lowerCAmelCase__ = 4_8000
lowerCAmelCase__ = 2
lowerCAmelCase__ = False
lowerCAmelCase__ = 'time_group_norm'
lowerCAmelCase__ = True
lowerCAmelCase__ = 1.0
lowerCAmelCase__ = 0.01
else:
raise ValueError(F"Unknown model name: {model_name}" )
lowerCAmelCase__ = EncodecModel(UpperCamelCase_ )
lowerCAmelCase__ = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(UpperCamelCase_ )
lowerCAmelCase__ = torch.load(UpperCamelCase_ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
lowerCAmelCase__ = original_checkpoint['best_state']
recursively_load_weights(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
model.save_pretrained(UpperCamelCase_ )
if repo_id:
print('Pushing to the hub...' )
feature_extractor.push_to_hub(UpperCamelCase_ )
model.push_to_hub(UpperCamelCase_ )
if __name__ == "__main__":
__snake_case : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--model""",
default="""encodec_24khz""",
type=str,
help="""The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
__snake_case : Dict = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 365 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class lowercase__( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any]=7 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE_ : List[Any]=3_0 , SCREAMING_SNAKE_CASE_ : Dict=4_0_0 , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : str=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE_ : int=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1 / 2_5_5 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , ) -> Union[str, Any]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowercase_ = size if size is not None else {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3}
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = num_channels
lowercase_ = min_resolution
lowercase_ = max_resolution
lowercase_ = do_resize
lowercase_ = size
lowercase_ = do_normalize
lowercase_ = image_mean
lowercase_ = image_std
lowercase_ = do_rescale
lowercase_ = rescale_factor
lowercase_ = do_pad
def _lowercase ( self : Optional[int] ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Dict=False ) -> List[str]:
if not batched:
lowercase_ = image_inputs[0]
if isinstance(SCREAMING_SNAKE_CASE_ , Image.Image ):
lowercase_ , lowercase_ = image.size
else:
lowercase_ , lowercase_ = image.shape[1], image.shape[2]
if w < h:
lowercase_ = int(self.size['''shortest_edge'''] * h / w )
lowercase_ = self.size['''shortest_edge''']
elif w > h:
lowercase_ = self.size['''shortest_edge''']
lowercase_ = int(self.size['''shortest_edge'''] * w / h )
else:
lowercase_ = self.size['''shortest_edge''']
lowercase_ = self.size['''shortest_edge''']
else:
lowercase_ = []
for image in image_inputs:
lowercase_ , lowercase_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowercase_ = max(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : item[0] )[0]
lowercase_ = max(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase__( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :Optional[int] = YolosImageProcessor if is_vision_available() else None
def _lowercase ( self : List[Any] ) -> Dict:
lowercase_ = YolosImageProcessingTester(self )
@property
def _lowercase ( self : List[str] ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self : Any ) -> List[Any]:
lowercase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''image_mean''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''image_std''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_normalize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_resize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''size''' ) )
def _lowercase ( self : str ) -> Any:
lowercase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3} )
self.assertEqual(image_processor.do_pad , SCREAMING_SNAKE_CASE_ )
lowercase_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=SCREAMING_SNAKE_CASE_ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2, '''longest_edge''': 8_4} )
self.assertEqual(image_processor.do_pad , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[str] ) -> Tuple:
pass
def _lowercase ( self : Optional[Any] ) -> Any:
# Initialize image_processing
lowercase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image )
# Test not batched input
lowercase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase_ , lowercase_ = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase_ , lowercase_ = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ )
lowercase_ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : List[Any] ) -> Tuple:
# Initialize image_processing
lowercase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , numpify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray )
# Test not batched input
lowercase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase_ , lowercase_ = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase_ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).pixel_values
lowercase_ , lowercase_ = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : List[Any] ) -> str:
# Initialize image_processing
lowercase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
# Test not batched input
lowercase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase_ , lowercase_ = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase_ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).pixel_values
lowercase_ , lowercase_ = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : Optional[Any] ) -> Tuple:
# Initialize image_processings
lowercase_ = self.image_processing_class(**self.image_processor_dict )
lowercase_ = self.image_processing_class(do_resize=SCREAMING_SNAKE_CASE_ , do_normalize=SCREAMING_SNAKE_CASE_ , do_rescale=SCREAMING_SNAKE_CASE_ )
# create random PyTorch tensors
lowercase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
lowercase_ = image_processing_a.pad(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
lowercase_ = image_processing_a(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
self.assertTrue(
torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1e-4 ) )
@slow
def _lowercase ( self : List[Any] ) -> Dict:
# prepare image and target
lowercase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
lowercase_ = json.loads(f.read() )
lowercase_ = {'''image_id''': 3_9_7_6_9, '''annotations''': target}
# encode them
lowercase_ = YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' )
lowercase_ = image_processing(images=SCREAMING_SNAKE_CASE_ , annotations=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
# verify pixel values
lowercase_ = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape , SCREAMING_SNAKE_CASE_ )
lowercase_ = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
# verify area
lowercase_ = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , SCREAMING_SNAKE_CASE_ ) )
# verify boxes
lowercase_ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , SCREAMING_SNAKE_CASE_ )
lowercase_ = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
# verify image_id
lowercase_ = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , SCREAMING_SNAKE_CASE_ ) )
# verify is_crowd
lowercase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , SCREAMING_SNAKE_CASE_ ) )
# verify class_labels
lowercase_ = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , SCREAMING_SNAKE_CASE_ ) )
# verify orig_size
lowercase_ = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , SCREAMING_SNAKE_CASE_ ) )
# verify size
lowercase_ = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , SCREAMING_SNAKE_CASE_ ) )
@slow
def _lowercase ( self : List[Any] ) -> Union[str, Any]:
# prepare image, target and masks_path
lowercase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
lowercase_ = json.loads(f.read() )
lowercase_ = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9_7_6_9, '''segments_info''': target}
lowercase_ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
lowercase_ = YolosImageProcessor(format='''coco_panoptic''' )
lowercase_ = image_processing(images=SCREAMING_SNAKE_CASE_ , annotations=SCREAMING_SNAKE_CASE_ , masks_path=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
# verify pixel values
lowercase_ = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape , SCREAMING_SNAKE_CASE_ )
lowercase_ = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
# verify area
lowercase_ = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , SCREAMING_SNAKE_CASE_ ) )
# verify boxes
lowercase_ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , SCREAMING_SNAKE_CASE_ )
lowercase_ = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
# verify image_id
lowercase_ = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , SCREAMING_SNAKE_CASE_ ) )
# verify is_crowd
lowercase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , SCREAMING_SNAKE_CASE_ ) )
# verify class_labels
lowercase_ = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , SCREAMING_SNAKE_CASE_ ) )
# verify masks
lowercase_ = 8_2_2_8_7_3
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , SCREAMING_SNAKE_CASE_ )
# verify orig_size
lowercase_ = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , SCREAMING_SNAKE_CASE_ ) )
# verify size
lowercase_ = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , SCREAMING_SNAKE_CASE_ ) )
| 97 | import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
__SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
class lowercase_ ( __snake_case ):
def __init__( self , *lowercase_ , **lowercase_ ):
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ ) | 670 | 0 |
from __future__ import annotations
from math import pow, sqrt
def UpperCamelCase (lowercase_: float , lowercase_: float , lowercase_: float ) -> dict[str, float]:
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance == 0:
return {"resistance": sqrt(pow(lowercase_ , 2 ) - pow(lowercase_ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(lowercase_ , 2 ) - pow(lowercase_ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(lowercase_ , 2 ) + pow(lowercase_ , 2 ) )}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
A_ : Union[str, Any] = logging.get_logger(__name__)
class _a (__magic_name__ ):
'''simple docstring'''
def __init__( self , *A__ , **A__ ):
warnings.warn(
"""The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PoolFormerImageProcessor instead.""" , A__ , )
super().__init__(*A__ , **A__ )
| 64 | 1 |
from __future__ import annotations
import numpy as np
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[int]:
return np.maximum(0 , lowerCamelCase_ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 89 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Any = {
"ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json",
}
class _lowerCamelCase( _a ):
lowercase_ : Any = """deta"""
lowercase_ : Union[str, Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self, lowerCamelCase=None, lowerCamelCase=9_00, lowerCamelCase=20_48, lowerCamelCase=6, lowerCamelCase=20_48, lowerCamelCase=8, lowerCamelCase=6, lowerCamelCase=10_24, lowerCamelCase=8, lowerCamelCase=0.0, lowerCamelCase=True, lowerCamelCase="relu", lowerCamelCase=2_56, lowerCamelCase=0.1, lowerCamelCase=0.0, lowerCamelCase=0.0, lowerCamelCase=0.0_2, lowerCamelCase=1.0, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase="sine", lowerCamelCase=5, lowerCamelCase=4, lowerCamelCase=4, lowerCamelCase=True, lowerCamelCase=3_00, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=1, lowerCamelCase=5, lowerCamelCase=2, lowerCamelCase=1, lowerCamelCase=1, lowerCamelCase=5, lowerCamelCase=2, lowerCamelCase=0.1, lowerCamelCase=0.2_5, **lowerCamelCase, ) -> Any:
"""simple docstring"""
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
_lowercase : List[Any] = CONFIG_MAPPING['resnet'](out_features=['stage2', 'stage3', 'stage4'])
else:
if isinstance(lowerCamelCase, lowerCamelCase):
_lowercase : Dict = backbone_config.pop('model_type')
_lowercase : int = CONFIG_MAPPING[backbone_model_type]
_lowercase : Union[str, Any] = config_class.from_dict(lowerCamelCase)
_lowercase : Union[str, Any] = backbone_config
_lowercase : Any = num_queries
_lowercase : Union[str, Any] = max_position_embeddings
_lowercase : Union[str, Any] = d_model
_lowercase : Optional[int] = encoder_ffn_dim
_lowercase : Optional[int] = encoder_layers
_lowercase : Optional[Any] = encoder_attention_heads
_lowercase : Optional[Any] = decoder_ffn_dim
_lowercase : Dict = decoder_layers
_lowercase : Tuple = decoder_attention_heads
_lowercase : Union[str, Any] = dropout
_lowercase : Optional[Any] = attention_dropout
_lowercase : int = activation_dropout
_lowercase : Tuple = activation_function
_lowercase : List[Any] = init_std
_lowercase : Union[str, Any] = init_xavier_std
_lowercase : int = encoder_layerdrop
_lowercase : Optional[int] = auxiliary_loss
_lowercase : Dict = position_embedding_type
# deformable attributes
_lowercase : Any = num_feature_levels
_lowercase : str = encoder_n_points
_lowercase : Any = decoder_n_points
_lowercase : List[str] = two_stage
_lowercase : Dict = two_stage_num_proposals
_lowercase : Any = with_box_refine
_lowercase : List[Any] = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.')
# Hungarian matcher
_lowercase : List[Any] = class_cost
_lowercase : Optional[int] = bbox_cost
_lowercase : str = giou_cost
# Loss coefficients
_lowercase : Optional[int] = mask_loss_coefficient
_lowercase : int = dice_loss_coefficient
_lowercase : List[Any] = bbox_loss_coefficient
_lowercase : Optional[Any] = giou_loss_coefficient
_lowercase : str = eos_coefficient
_lowercase : int = focal_alpha
super().__init__(is_encoder_decoder=lowerCamelCase, **lowerCamelCase)
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return self.d_model
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : int = copy.deepcopy(self.__dict__)
_lowercase : Optional[int] = self.backbone_config.to_dict()
_lowercase : Optional[Any] = self.__class__.model_type
return output
| 89 | 1 |
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class _A :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=[1, 1, 2] , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu_new" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = parent
SCREAMING_SNAKE_CASE_ : List[Any] = batch_size
SCREAMING_SNAKE_CASE_ : Any = seq_length
SCREAMING_SNAKE_CASE_ : Optional[int] = is_training
SCREAMING_SNAKE_CASE_ : Optional[int] = use_input_mask
SCREAMING_SNAKE_CASE_ : Dict = use_token_type_ids
SCREAMING_SNAKE_CASE_ : int = use_labels
SCREAMING_SNAKE_CASE_ : Any = vocab_size
SCREAMING_SNAKE_CASE_ : Dict = block_sizes
SCREAMING_SNAKE_CASE_ : Tuple = num_decoder_layers
SCREAMING_SNAKE_CASE_ : Any = d_model
SCREAMING_SNAKE_CASE_ : List[Any] = n_head
SCREAMING_SNAKE_CASE_ : int = d_head
SCREAMING_SNAKE_CASE_ : List[Any] = d_inner
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Dict = hidden_dropout
SCREAMING_SNAKE_CASE_ : Union[str, Any] = attention_dropout
SCREAMING_SNAKE_CASE_ : Optional[int] = activation_dropout
SCREAMING_SNAKE_CASE_ : Any = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Optional[int] = type_vocab_size
SCREAMING_SNAKE_CASE_ : Dict = 2
SCREAMING_SNAKE_CASE_ : List[str] = num_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_choices
SCREAMING_SNAKE_CASE_ : List[str] = scope
SCREAMING_SNAKE_CASE_ : Any = initializer_std
# Used in the tests to check the size of the first attention layer
SCREAMING_SNAKE_CASE_ : Any = n_head
# Used in the tests to check the size of the first hidden state
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.d_model
# Used in the tests to check the number of output hidden states/attentions
SCREAMING_SNAKE_CASE_ : Union[str, Any] = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
SCREAMING_SNAKE_CASE_ : Any = self.num_hidden_layers + 2
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : List[str] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : int = None
SCREAMING_SNAKE_CASE_ : Tuple = None
SCREAMING_SNAKE_CASE_ : Optional[int] = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE_ : Dict = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = TFFunnelModel(config=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
SCREAMING_SNAKE_CASE_ : List[Any] = model(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [input_ids, input_mask]
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
SCREAMING_SNAKE_CASE_ : Any = False
SCREAMING_SNAKE_CASE_ : Any = TFFunnelModel(config=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : Optional[int] = TFFunnelModel(config=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = TFFunnelBaseModel(config=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
SCREAMING_SNAKE_CASE_ : Optional[int] = model(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = [input_ids, input_mask]
SCREAMING_SNAKE_CASE_ : Dict = model(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : Tuple = TFFunnelBaseModel(config=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
SCREAMING_SNAKE_CASE_ : Dict = False
SCREAMING_SNAKE_CASE_ : List[Any] = TFFunnelBaseModel(config=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Any = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = TFFunnelForPreTraining(config=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
SCREAMING_SNAKE_CASE_ : Tuple = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = TFFunnelForMaskedLM(config=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
SCREAMING_SNAKE_CASE_ : str = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.num_labels
SCREAMING_SNAKE_CASE_ : List[Any] = TFFunnelForSequenceClassification(config=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
SCREAMING_SNAKE_CASE_ : int = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.num_choices
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TFFunnelForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : List[str] = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : Any = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE_ : Optional[int] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.num_labels
SCREAMING_SNAKE_CASE_ : int = TFFunnelForTokenClassification(config=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
SCREAMING_SNAKE_CASE_ : int = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = TFFunnelForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
SCREAMING_SNAKE_CASE_ : str = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.prepare_config_and_inputs()
(
SCREAMING_SNAKE_CASE_
) : str = config_and_inputs
SCREAMING_SNAKE_CASE_ : int = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _A ( __magic_name__ , __magic_name__ , unittest.TestCase):
SCREAMING_SNAKE_CASE : str = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE : str = (
{
'''feature-extraction''': (TFFunnelBaseModel, TFFunnelModel),
'''fill-mask''': TFFunnelForMaskedLM,
'''question-answering''': TFFunnelForQuestionAnswering,
'''text-classification''': TFFunnelForSequenceClassification,
'''token-classification''': TFFunnelForTokenClassification,
'''zero-shot''': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : Any = False
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = TFFunnelModelTester(self )
SCREAMING_SNAKE_CASE_ : Dict = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_SCREAMING_SNAKE_CASE )
@require_tf
class _A ( __magic_name__ , unittest.TestCase):
SCREAMING_SNAKE_CASE : Union[str, Any] = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : Dict = False
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = TFFunnelModelTester(self , base=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
| 709 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
@dataclass
class _A :
def __init__( self , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=6.0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="fp4" , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = load_in_abit
SCREAMING_SNAKE_CASE_ : Tuple = load_in_abit
SCREAMING_SNAKE_CASE_ : Dict = llm_inta_threshold
SCREAMING_SNAKE_CASE_ : Tuple = llm_inta_skip_modules
SCREAMING_SNAKE_CASE_ : Optional[int] = llm_inta_enable_fpaa_cpu_offload
SCREAMING_SNAKE_CASE_ : Optional[Any] = llm_inta_has_fpaa_weight
SCREAMING_SNAKE_CASE_ : List[str] = bnb_abit_quant_type
SCREAMING_SNAKE_CASE_ : int = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
SCREAMING_SNAKE_CASE_ : List[Any] = torch.floataa
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE , torch.dtype ):
SCREAMING_SNAKE_CASE_ : Optional[int] = bnb_abit_compute_dtype
else:
raise ValueError('bnb_4bit_compute_dtype must be a string or a torch.dtype' )
self.post_init()
def UpperCAmelCase ( self ):
"""simple docstring"""
if not isinstance(self.llm_inta_threshold , _SCREAMING_SNAKE_CASE ):
raise ValueError('llm_int8_threshold must be a float' )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , _SCREAMING_SNAKE_CASE ):
raise ValueError('llm_int8_skip_modules must be a list of strings' )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , _SCREAMING_SNAKE_CASE ):
raise ValueError('llm_int8_enable_fp32_cpu_offload must be a boolean' )
if not isinstance(self.llm_inta_has_fpaa_weight , _SCREAMING_SNAKE_CASE ):
raise ValueError('llm_int8_has_fp16_weight must be a boolean' )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError('bnb_4bit_compute_dtype must be torch.dtype' )
if not isinstance(self.bnb_abit_quant_type , _SCREAMING_SNAKE_CASE ):
raise ValueError('bnb_4bit_quant_type must be a string' )
if not isinstance(self.bnb_abit_use_double_quant , _SCREAMING_SNAKE_CASE ):
raise ValueError('bnb_4bit_use_double_quant must be a boolean' )
if self.load_in_abit and not version.parse(importlib.metadata.version('bitsandbytes' ) ) >= version.parse(
'0.39.0' ):
raise ValueError(
'4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version' )
def UpperCAmelCase ( self ):
"""simple docstring"""
return self.load_in_abit or self.load_in_abit
def UpperCAmelCase ( self ):
"""simple docstring"""
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def UpperCAmelCase ( cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = cls(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
for key, value in kwargs.items():
if hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
to_remove.append(_SCREAMING_SNAKE_CASE )
for key in to_remove:
kwargs.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if return_unused_kwargs:
return config, kwargs
else:
return config
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as writer:
SCREAMING_SNAKE_CASE_ : Optional[int] = self.to_dict()
SCREAMING_SNAKE_CASE_ : Optional[Any] = json.dumps(_SCREAMING_SNAKE_CASE , indent=2 , sort_keys=_SCREAMING_SNAKE_CASE ) + '\n'
writer.write(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ : str = str(output['bnb_4bit_compute_dtype'] ).split('.' )[1]
return output
def __repr__( self ):
"""simple docstring"""
return f"{self.__class__.__name__} {self.to_json_string()}"
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE = True ):
"""simple docstring"""
if use_diff is True:
SCREAMING_SNAKE_CASE_ : int = self.to_diff_dict()
else:
SCREAMING_SNAKE_CASE_ : List[str] = self.to_dict()
return json.dumps(_SCREAMING_SNAKE_CASE , indent=2 , sort_keys=_SCREAMING_SNAKE_CASE ) + "\n"
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.to_dict()
# get the default config dict
SCREAMING_SNAKE_CASE_ : Optional[Any] = BitsAndBytesConfig().to_dict()
SCREAMING_SNAKE_CASE_ : Optional[Any] = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
SCREAMING_SNAKE_CASE_ : int = value
return serializable_config_dict
| 353 | 0 |
'''simple docstring'''
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE = 200_0000 ) -> List[str]:
snake_case__ : Optional[Any] = [0 for i in range(n + 1 )]
snake_case__ : List[Any] = 1
snake_case__ : Optional[Any] = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , A__ ):
snake_case__ : Any = 1
snake_case__ : Dict = 0
for i in range(A__ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F'{solution() = }')
| 270 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : List[Any] = logging.get_logger(__name__)
__A : List[Any] = {
"andreasmadsen/efficient_mlm_m0.40": (
"https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"
),
}
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = 'roberta-prelayernorm'
def __init__( self : Tuple , lowerCamelCase : Tuple=5_02_65 , lowerCamelCase : Optional[int]=7_68 , lowerCamelCase : Optional[int]=12 , lowerCamelCase : Optional[int]=12 , lowerCamelCase : int=30_72 , lowerCamelCase : Optional[int]="gelu" , lowerCamelCase : List[str]=0.1 , lowerCamelCase : Tuple=0.1 , lowerCamelCase : Optional[int]=5_12 , lowerCamelCase : Union[str, Any]=2 , lowerCamelCase : int=0.02 , lowerCamelCase : Any=1E-12 , lowerCamelCase : int=1 , lowerCamelCase : List[Any]=0 , lowerCamelCase : List[Any]=2 , lowerCamelCase : Optional[Any]="absolute" , lowerCamelCase : List[Any]=True , lowerCamelCase : Tuple=None , **lowerCamelCase : int , ) -> Optional[Any]:
super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
lowerCAmelCase_ : Dict = vocab_size
lowerCAmelCase_ : str = hidden_size
lowerCAmelCase_ : Dict = num_hidden_layers
lowerCAmelCase_ : List[Any] = num_attention_heads
lowerCAmelCase_ : List[str] = hidden_act
lowerCAmelCase_ : Optional[Any] = intermediate_size
lowerCAmelCase_ : List[str] = hidden_dropout_prob
lowerCAmelCase_ : Tuple = attention_probs_dropout_prob
lowerCAmelCase_ : Optional[int] = max_position_embeddings
lowerCAmelCase_ : List[Any] = type_vocab_size
lowerCAmelCase_ : List[Any] = initializer_range
lowerCAmelCase_ : Union[str, Any] = layer_norm_eps
lowerCAmelCase_ : List[str] = position_embedding_type
lowerCAmelCase_ : Tuple = use_cache
lowerCAmelCase_ : Union[str, Any] = classifier_dropout
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
@property
def __lowercase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCAmelCase_ : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCAmelCase_ : Optional[int] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 275 | 0 |
import cva
import numpy as np
class __SCREAMING_SNAKE_CASE:
def __init__( self: Dict , UpperCamelCase: float , UpperCamelCase: int ) -> List[Any]:
if k in (0.04, 0.06):
snake_case__ = k
snake_case__ = window_size
else:
raise ValueError('invalid k value' )
def __str__( self: Optional[Any] ) -> str:
return str(self.k )
def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: str ) -> tuple[cva.Mat, list[list[int]]]:
snake_case__ = cva.imread(UpperCamelCase , 0 )
snake_case__ , snake_case__ = img.shape
snake_case__ = []
snake_case__ = img.copy()
snake_case__ = cva.cvtColor(UpperCamelCase , cva.COLOR_GRAY2RGB )
snake_case__ , snake_case__ = np.gradient(UpperCamelCase )
snake_case__ = dx**2
snake_case__ = dy**2
snake_case__ = dx * dy
snake_case__ = 0.04
snake_case__ = self.window_size // 2
for y in range(UpperCamelCase , h - offset ):
for x in range(UpperCamelCase , w - offset ):
snake_case__ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
snake_case__ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
snake_case__ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
snake_case__ = (wxx * wyy) - (wxy**2)
snake_case__ = wxx + wyy
snake_case__ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_55 )
return color_img, corner_list
if __name__ == "__main__":
__UpperCamelCase : str = HarrisCorner(0.0_4, 3)
__UpperCamelCase : List[Any] = edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img)
| 718 |
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
__UpperCamelCase : Optional[int] = 8
def a_ ( _A , _A=BITS ) -> List[Any]:
"""simple docstring"""
snake_case__ = x.device
snake_case__ = (x * 255).int().clamp(0 , 255 )
snake_case__ = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_A )
snake_case__ = rearrange(_A , 'd -> d 1 1' )
snake_case__ = rearrange(_A , 'b c h w -> b c 1 h w' )
snake_case__ = ((x & mask) != 0).float()
snake_case__ = rearrange(_A , 'b c d h w -> b (c d) h w' )
snake_case__ = bits * 2 - 1
return bits
def a_ ( _A , _A=BITS ) -> List[str]:
"""simple docstring"""
snake_case__ = x.device
snake_case__ = (x > 0).int()
snake_case__ = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_A , dtype=torch.intaa )
snake_case__ = rearrange(_A , 'd -> d 1 1' )
snake_case__ = rearrange(_A , 'b (c d) h w -> b c d h w' , d=8 )
snake_case__ = reduce(x * mask , 'b c d h w -> b c h w' , 'sum' )
return (dec / 255).clamp(0.0 , 1.0 )
def a_ ( self , _A , _A , _A , _A = 0.0 , _A = True , _A=None , _A = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
snake_case__ = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
snake_case__ = self.alphas_cumprod[timestep]
snake_case__ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
snake_case__ = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
snake_case__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
snake_case__ = self.bit_scale
if self.config.clip_sample:
snake_case__ = torch.clamp(_A , -scale , _A )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
snake_case__ = self._get_variance(_A , _A )
snake_case__ = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
snake_case__ = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
snake_case__ = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
snake_case__ = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
snake_case__ = model_output.device if torch.is_tensor(_A ) else 'cpu'
snake_case__ = torch.randn(model_output.shape , dtype=model_output.dtype , generator=_A ).to(_A )
snake_case__ = self._get_variance(_A , _A ) ** 0.5 * eta * noise
snake_case__ = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=_A , pred_original_sample=_A )
def a_ ( self , _A , _A , _A , _A="epsilon" , _A=None , _A = True , ) -> Union[DDPMSchedulerOutput, Tuple]:
"""simple docstring"""
snake_case__ = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
snake_case__ , snake_case__ = torch.split(_A , sample.shape[1] , dim=1 )
else:
snake_case__ = None
# 1. compute alphas, betas
snake_case__ = self.alphas_cumprod[t]
snake_case__ = self.alphas_cumprod[t - 1] if t > 0 else self.one
snake_case__ = 1 - alpha_prod_t
snake_case__ = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
snake_case__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
snake_case__ = model_output
else:
raise ValueError(f'''Unsupported prediction_type {prediction_type}.''' )
# 3. Clip "predicted x_0"
snake_case__ = self.bit_scale
if self.config.clip_sample:
snake_case__ = torch.clamp(_A , -scale , _A )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
snake_case__ = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
snake_case__ = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
snake_case__ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
snake_case__ = 0
if t > 0:
snake_case__ = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=_A ).to(model_output.device )
snake_case__ = (self._get_variance(_A , predicted_variance=_A ) ** 0.5) * noise
snake_case__ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=_A , pred_original_sample=_A )
class __SCREAMING_SNAKE_CASE( a_ ):
def __init__( self: Dict , UpperCamelCase: UNetaDConditionModel , UpperCamelCase: Union[DDIMScheduler, DDPMScheduler] , UpperCamelCase: Optional[float] = 1.0 , ) -> Union[str, Any]:
super().__init__()
snake_case__ = bit_scale
snake_case__ = (
ddim_bit_scheduler_step if isinstance(UpperCamelCase , UpperCamelCase ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=UpperCamelCase , scheduler=UpperCamelCase )
@torch.no_grad()
def __call__( self: Dict , UpperCamelCase: Optional[int] = 2_56 , UpperCamelCase: Optional[int] = 2_56 , UpperCamelCase: Optional[int] = 50 , UpperCamelCase: Optional[torch.Generator] = None , UpperCamelCase: Optional[int] = 1 , UpperCamelCase: Optional[str] = "pil" , UpperCamelCase: bool = True , **UpperCamelCase: int , ) -> Union[Tuple, ImagePipelineOutput]:
snake_case__ = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=UpperCamelCase , )
snake_case__ = decimal_to_bits(UpperCamelCase ) * self.bit_scale
snake_case__ = latents.to(self.device )
self.scheduler.set_timesteps(UpperCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
snake_case__ = self.unet(UpperCamelCase , UpperCamelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
snake_case__ = self.scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase ).prev_sample
snake_case__ = bits_to_decimal(UpperCamelCase )
if output_type == "pil":
snake_case__ = self.numpy_to_pil(UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase )
| 372 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
def _UpperCamelCase ( lowerCAmelCase__: int ,lowerCAmelCase__: int ,lowerCAmelCase__: bool ,lowerCAmelCase__: list[int] ,lowerCAmelCase__: float ) -> int:
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if len(lowerCAmelCase__ ) == 0:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 ,node_index * 2 ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) ,minimax(depth + 1 ,node_index * 2 + 1 ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) ,)
return min(
minimax(depth + 1 ,node_index * 2 ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) ,minimax(depth + 1 ,node_index * 2 + 1 ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) ,)
def _UpperCamelCase ( ) -> None:
SCREAMING_SNAKE_CASE_ = [90, 23, 6, 33, 21, 65, 123, 3_4423]
SCREAMING_SNAKE_CASE_ = math.log(len(lowerCAmelCase__ ) ,2 )
print('Optimal value : ' ,end='' )
print(minimax(0 ,0 ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 294 |
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = 0 # The first color of the flag.
SCREAMING_SNAKE_CASE : List[str] = 1 # The second color of the flag.
SCREAMING_SNAKE_CASE : Optional[Any] = 2 # The third color of the flag.
SCREAMING_SNAKE_CASE : Tuple = (red, white, blue)
def _UpperCamelCase ( lowerCAmelCase__: list ) -> list:
if not sequence:
return []
if len(lowerCAmelCase__ ) == 1:
return list(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = len(lowerCAmelCase__ ) - 1
SCREAMING_SNAKE_CASE_ = 0
while mid <= high:
if sequence[mid] == colors[0]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = sequence[high], sequence[mid]
high -= 1
else:
SCREAMING_SNAKE_CASE_ = F"""The elements inside the sequence must contains only {colors} values"""
raise ValueError(lowerCAmelCase__ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : str = input("Enter numbers separated by commas:\n").strip()
SCREAMING_SNAKE_CASE : List[Any] = [int(item.strip()) for item in user_input.split(",")]
print(f"{dutch_national_flag_sort(unsorted)}")
| 294 | 1 |
def UpperCAmelCase ( _lowerCamelCase ):
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod() | 17 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class lowerCamelCase_ :
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str:
return self.get_dummy_input()
@property
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]:
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" )
def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : int=False , __lowerCamelCase : int=False , __lowerCamelCase : Optional[int]=False , ) -> Dict:
A : Optional[Any] = 4
A : List[str] = 32
A : Any = (32, 32)
A : str = torch.manual_seed(0 )
A : int = torch.device(__lowerCamelCase )
A : List[str] = (batch_size, num_channels) + sizes
A : Dict = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase )
A : int = {"hidden_states": hidden_states}
if include_temb:
A : Any = 1_28
A : List[str] = randn_tensor((batch_size, temb_channels) , generator=__lowerCamelCase , device=__lowerCamelCase )
if include_res_hidden_states_tuple:
A : str = torch.manual_seed(1 )
A : Tuple = (randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase ),)
if include_encoder_hidden_states:
A : Dict = floats_tensor((batch_size, 32, 32) ).to(__lowerCamelCase )
if include_skip_sample:
A : Optional[int] = randn_tensor(((batch_size, 3) + sizes) , generator=__lowerCamelCase , device=__lowerCamelCase )
return dummy_input
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Union[str, Any]:
A : Dict = {
"in_channels": 32,
"out_channels": 32,
"temb_channels": 1_28,
}
if self.block_type == "up":
A : Dict = 32
if self.block_type == "mid":
init_dict.pop("out_channels" )
A : str = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : Optional[int] ) -> Union[str, Any]:
A , A : str = self.prepare_init_args_and_inputs_for_common()
A : List[Any] = self.block_class(**__lowerCamelCase )
unet_block.to(__lowerCamelCase )
unet_block.eval()
with torch.no_grad():
A : int = unet_block(**__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
A : Union[str, Any] = output[0]
self.assertEqual(output.shape , self.output_shape )
A : Any = output[0, -1, -3:, -3:]
A : Union[str, Any] = torch.tensor(__lowerCamelCase ).to(__lowerCamelCase )
assert torch_all_close(output_slice.flatten() , __lowerCamelCase , atol=5e-3 )
@unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict:
A , A : Tuple = self.prepare_init_args_and_inputs_for_common()
A : str = self.block_class(**__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
A : Optional[int] = model(**__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
A : Optional[Any] = output[0]
A : List[str] = torch.device(__lowerCamelCase )
A : List[str] = randn_tensor(output.shape , device=__lowerCamelCase )
A : Dict = torch.nn.functional.mse_loss(__lowerCamelCase , __lowerCamelCase )
loss.backward() | 17 | 1 |
"""simple docstring"""
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def __snake_case ( __A : List[Any] , __A : Tuple ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = XCLIPTextConfig()
# derive patch size from model name
SCREAMING_SNAKE_CASE : Dict = model_name.find('patch' )
SCREAMING_SNAKE_CASE : Union[str, Any] = int(model_name[start_idx + len('patch' ) : start_idx + len('patch' ) + 2] )
SCREAMING_SNAKE_CASE : Union[str, Any] = XCLIPVisionConfig(patch_size=__A , num_frames=__A )
if "large" in model_name:
SCREAMING_SNAKE_CASE : List[str] = 768
SCREAMING_SNAKE_CASE : Union[str, Any] = 3072
SCREAMING_SNAKE_CASE : Tuple = 12
SCREAMING_SNAKE_CASE : Any = 1024
SCREAMING_SNAKE_CASE : Tuple = 4096
SCREAMING_SNAKE_CASE : Union[str, Any] = 16
SCREAMING_SNAKE_CASE : Dict = 24
SCREAMING_SNAKE_CASE : str = 768
SCREAMING_SNAKE_CASE : Tuple = 3072
if model_name == "xclip-large-patch14-16-frames":
SCREAMING_SNAKE_CASE : Optional[Any] = 336
SCREAMING_SNAKE_CASE : List[str] = XCLIPConfig.from_text_vision_configs(__A , __A )
if "large" in model_name:
SCREAMING_SNAKE_CASE : Tuple = 768
return config
def __snake_case ( __A : Tuple ) -> Dict:
'''simple docstring'''
# text encoder
if name == "token_embedding.weight":
SCREAMING_SNAKE_CASE : str = name.replace('token_embedding.weight' , 'text_model.embeddings.token_embedding.weight' )
if name == "positional_embedding":
SCREAMING_SNAKE_CASE : int = name.replace('positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "ln_1" in name:
SCREAMING_SNAKE_CASE : Any = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
SCREAMING_SNAKE_CASE : Dict = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
SCREAMING_SNAKE_CASE : Any = name.replace('c_proj' , 'fc2' )
if name.startswith('transformer.resblocks' ):
SCREAMING_SNAKE_CASE : int = name.replace('transformer.resblocks' , 'text_model.encoder.layers' )
if "attn.out_proj" in name and "message" not in name:
SCREAMING_SNAKE_CASE : int = name.replace('attn.out_proj' , 'self_attn.out_proj' )
if "ln_final" in name:
SCREAMING_SNAKE_CASE : str = name.replace('ln_final' , 'text_model.final_layer_norm' )
# visual encoder
if name == "visual.class_embedding":
SCREAMING_SNAKE_CASE : int = name.replace('visual.class_embedding' , 'vision_model.embeddings.class_embedding' )
if name == "visual.positional_embedding":
SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('visual.positional_embedding' , 'vision_model.embeddings.position_embedding.weight' )
if name.startswith('visual.transformer.resblocks' ):
SCREAMING_SNAKE_CASE : int = name.replace('visual.transformer.resblocks' , 'vision_model.encoder.layers' )
if "visual.conv1" in name:
SCREAMING_SNAKE_CASE : str = name.replace('visual.conv1' , 'vision_model.embeddings.patch_embedding' )
if "visual.ln_pre" in name:
SCREAMING_SNAKE_CASE : Tuple = name.replace('visual.ln_pre' , 'vision_model.pre_layernorm' )
if "visual.ln_post" in name:
SCREAMING_SNAKE_CASE : Optional[int] = name.replace('visual.ln_post' , 'vision_model.post_layernorm' )
if "visual.proj" in name:
SCREAMING_SNAKE_CASE : str = name.replace('visual.proj' , 'visual_projection.weight' )
if "text_projection" in name:
SCREAMING_SNAKE_CASE : Any = name.replace('text_projection' , 'text_projection.weight' )
# things on top
if "prompts_visual_proj" in name:
SCREAMING_SNAKE_CASE : Optional[Any] = name.replace('prompts_visual_proj' , 'prompts_visual_projection' )
if "prompts_visual_ln" in name:
SCREAMING_SNAKE_CASE : List[str] = name.replace('prompts_visual_ln' , 'prompts_visual_layernorm' )
# mit
if name == "mit.positional_embedding":
SCREAMING_SNAKE_CASE : Tuple = name.replace('positional' , 'position' )
if name.startswith('mit.resblocks' ):
SCREAMING_SNAKE_CASE : Optional[int] = name.replace('mit.resblocks' , 'mit.encoder.layers' )
# prompts generator
if name.startswith('prompts_generator.norm' ):
SCREAMING_SNAKE_CASE : Optional[int] = name.replace('prompts_generator.norm' , 'prompts_generator.layernorm' )
return name
def __snake_case ( __A : int , __A : List[str] ) -> Union[str, Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE : Dict = orig_state_dict.pop(__A )
if "attn.in_proj" in key:
SCREAMING_SNAKE_CASE : Optional[Any] = key.split('.' )
if key.startswith('visual' ):
SCREAMING_SNAKE_CASE : Any = key_split[3]
SCREAMING_SNAKE_CASE : Any = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
SCREAMING_SNAKE_CASE : str = val[
:dim, :
]
SCREAMING_SNAKE_CASE : str = val[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE : Tuple = val[
-dim:, :
]
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = val[
:dim
]
SCREAMING_SNAKE_CASE : Union[str, Any] = val[
dim : dim * 2
]
SCREAMING_SNAKE_CASE : List[Any] = val[
-dim:
]
else:
if "weight" in key:
SCREAMING_SNAKE_CASE : Any = val[
:dim, :
]
SCREAMING_SNAKE_CASE : Tuple = val[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE : str = val[
-dim:, :
]
else:
SCREAMING_SNAKE_CASE : str = val[:dim]
SCREAMING_SNAKE_CASE : Dict = val[
dim : dim * 2
]
SCREAMING_SNAKE_CASE : Any = val[-dim:]
elif key.startswith('mit' ):
SCREAMING_SNAKE_CASE : int = key_split[2]
SCREAMING_SNAKE_CASE : str = config.vision_config.mit_hidden_size
if "weight" in key:
SCREAMING_SNAKE_CASE : str = val[:dim, :]
SCREAMING_SNAKE_CASE : Dict = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE : Union[str, Any] = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE : Dict = val[:dim]
SCREAMING_SNAKE_CASE : Any = val[dim : dim * 2]
SCREAMING_SNAKE_CASE : Any = val[-dim:]
else:
SCREAMING_SNAKE_CASE : Optional[int] = key_split[2]
SCREAMING_SNAKE_CASE : Optional[int] = config.text_config.hidden_size
if "weight" in key:
SCREAMING_SNAKE_CASE : Union[str, Any] = val[:dim, :]
SCREAMING_SNAKE_CASE : str = val[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE : Union[str, Any] = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE : Optional[Any] = val[:dim]
SCREAMING_SNAKE_CASE : Tuple = val[
dim : dim * 2
]
SCREAMING_SNAKE_CASE : Optional[int] = val[-dim:]
else:
SCREAMING_SNAKE_CASE : Optional[Any] = rename_key(__A )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
SCREAMING_SNAKE_CASE : List[Any] = val.T
SCREAMING_SNAKE_CASE : Optional[Any] = val
return orig_state_dict
def __snake_case ( __A : Optional[Any] ) -> Dict:
'''simple docstring'''
if num_frames == 8:
SCREAMING_SNAKE_CASE : Optional[Any] = 'eating_spaghetti_8_frames.npy'
elif num_frames == 16:
SCREAMING_SNAKE_CASE : List[Any] = 'eating_spaghetti.npy'
elif num_frames == 32:
SCREAMING_SNAKE_CASE : Dict = 'eating_spaghetti_32_frames.npy'
SCREAMING_SNAKE_CASE : Optional[Any] = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename=__A , repo_type='dataset' , )
SCREAMING_SNAKE_CASE : Optional[Any] = np.load(__A )
return list(__A )
def __snake_case ( __A : List[str] , __A : str=None , __A : Any=False ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = {
# fully supervised kinetics-400 checkpoints
'xclip-base-patch32': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth',
'xclip-base-patch32-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'
),
'xclip-base-patch16': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth',
'xclip-base-patch16-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'
),
'xclip-large-patch14': 'https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb',
'xclip-large-patch14-16-frames': 'https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f',
# fully supervised kinetics-600 checkpoints
'xclip-base-patch16-kinetics-600': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'
),
'xclip-base-patch16-kinetics-600-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'
),
'xclip-large-patch14-kinetics-600': 'https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be',
# few shot
'xclip-base-patch16-hmdb-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'
),
'xclip-base-patch16-hmdb-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'
),
'xclip-base-patch16-hmdb-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'
),
'xclip-base-patch16-hmdb-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'
),
'xclip-base-patch16-ucf-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'
),
'xclip-base-patch16-ucf-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'
),
'xclip-base-patch16-ucf-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'
),
'xclip-base-patch16-ucf-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'
),
# zero shot
'xclip-base-patch16-zero-shot': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth',
}
SCREAMING_SNAKE_CASE : Optional[Any] = model_to_url[model_name]
SCREAMING_SNAKE_CASE : Optional[Any] = 8
if "16-frames" in model_name:
SCREAMING_SNAKE_CASE : List[str] = 16
elif "shot" in model_name:
SCREAMING_SNAKE_CASE : Optional[Any] = 32
SCREAMING_SNAKE_CASE : Tuple = get_xclip_config(__A , __A )
SCREAMING_SNAKE_CASE : Optional[Any] = XCLIPModel(__A )
model.eval()
if "drive" in checkpoint_url:
SCREAMING_SNAKE_CASE : str = 'pytorch_model.bin'
gdown.cached_download(__A , __A , quiet=__A )
SCREAMING_SNAKE_CASE : Optional[int] = torch.load(__A , map_location='cpu' )['model']
else:
SCREAMING_SNAKE_CASE : Any = torch.hub.load_state_dict_from_url(__A )['model']
SCREAMING_SNAKE_CASE : Tuple = convert_state_dict(__A , __A )
SCREAMING_SNAKE_CASE : List[str] = XCLIPModel(__A )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = model.load_state_dict(__A , strict=__A )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
SCREAMING_SNAKE_CASE : int = 336 if model_name == 'xclip-large-patch14-16-frames' else 224
SCREAMING_SNAKE_CASE : Optional[int] = VideoMAEImageProcessor(size=__A )
SCREAMING_SNAKE_CASE : Any = CLIPTokenizer.from_pretrained('openai/clip-vit-base-patch32' )
SCREAMING_SNAKE_CASE : str = CLIPTokenizerFast.from_pretrained('openai/clip-vit-base-patch32' )
SCREAMING_SNAKE_CASE : List[str] = XCLIPProcessor(image_processor=__A , tokenizer=__A )
SCREAMING_SNAKE_CASE : List[Any] = prepare_video(__A )
SCREAMING_SNAKE_CASE : int = processor(
text=['playing sports', 'eating spaghetti', 'go shopping'] , videos=__A , return_tensors='pt' , padding=__A )
print('Shape of pixel values:' , inputs.pixel_values.shape )
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = model(**__A )
# Verify outputs
SCREAMING_SNAKE_CASE : Tuple = outputs.logits_per_video
SCREAMING_SNAKE_CASE : str = logits_per_video.softmax(dim=1 )
print('Probs:' , __A )
# kinetics-400
if model_name == "xclip-base-patch32":
SCREAMING_SNAKE_CASE : Any = torch.tensor([[0.0_0_1_9, 0.9_9_5_1, 0.0_0_3_0]] )
elif model_name == "xclip-base-patch32-16-frames":
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[7.0999E-04, 9.9883E-01, 4.5580E-04]] )
elif model_name == "xclip-base-patch16":
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0.0_0_8_3, 0.9_6_8_1, 0.0_2_3_6]] )
elif model_name == "xclip-base-patch16-16-frames":
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[7.6937E-04, 9.9728E-01, 1.9473E-03]] )
elif model_name == "xclip-large-patch14":
SCREAMING_SNAKE_CASE : int = torch.tensor([[0.0_0_6_2, 0.9_8_6_4, 0.0_0_7_5]] )
elif model_name == "xclip-large-patch14-16-frames":
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[3.3877E-04, 9.9937E-01, 2.8888E-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[0.0_5_5_5, 0.8_9_1_4, 0.0_5_3_1]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[3.8554E-04, 9.9929E-01, 3.2754E-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0.0_0_3_6, 0.9_9_2_0, 0.0_0_4_5]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
SCREAMING_SNAKE_CASE : Dict = torch.tensor([[7.1890E-06, 9.9994E-01, 5.6559E-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
SCREAMING_SNAKE_CASE : int = torch.tensor([[1.0320E-05, 9.9993E-01, 6.2435E-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[4.1377E-06, 9.9990E-01, 9.8386E-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
SCREAMING_SNAKE_CASE : Any = torch.tensor([[4.1347E-05, 9.9962E-01, 3.3411E-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
SCREAMING_SNAKE_CASE : Any = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
SCREAMING_SNAKE_CASE : str = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
SCREAMING_SNAKE_CASE : Dict = torch.tensor([[0.0_0_2_7, 0.9_9_0_4, 0.0_0_7_0]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
SCREAMING_SNAKE_CASE : Dict = torch.tensor([[9.8219E-04, 9.9593E-01, 3.0863E-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
SCREAMING_SNAKE_CASE : Any = torch.tensor([[3.5082E-04, 9.9785E-01, 1.7966E-03]] )
else:
raise ValueError(F"""Model name {model_name} not supported""" )
assert torch.allclose(__A , __A , atol=1E-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__A )
if push_to_hub:
print('Pushing model, processor and slow tokenizer files to the hub...' )
model.push_to_hub(__A , organization='nielsr' )
processor.push_to_hub(__A , organization='nielsr' )
slow_tokenizer.push_to_hub(__A , organization='nielsr' )
if __name__ == "__main__":
A_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='xclip-base-patch32',
type=str,
help='Name of the model.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
A_ : List[str] = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 265 |
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
A_ : int = get_tests_dir('fixtures')
A_ : int = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
A_ : Dict = get_tests_dir('fixtures/dummy-config.json')
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = 0
def _lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = AutoFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : List[Any] = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
SCREAMING_SNAKE_CASE : Optional[Any] = AutoFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE ).to_dict()
config_dict.pop('feature_extractor_type' )
SCREAMING_SNAKE_CASE : int = WavaVecaFeatureExtractor(**_SCREAMING_SNAKE_CASE )
# save in new folder
model_config.save_pretrained(_SCREAMING_SNAKE_CASE )
config.save_pretrained(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[Any] = AutoFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE )
# make sure private variable is not incorrectly saved
SCREAMING_SNAKE_CASE : Optional[int] = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = AutoFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE , 'bert-base is not a local folder and is not a valid model identifier' ):
SCREAMING_SNAKE_CASE : str = AutoFeatureExtractor.from_pretrained('bert-base' )
def _lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
SCREAMING_SNAKE_CASE : List[str] = AutoFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE , revision='aaaaaa' )
def _lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
SCREAMING_SNAKE_CASE : int = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' )
def _lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : int = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[str] = AutoFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE , trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
def _lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
try:
AutoConfig.register('custom' , _SCREAMING_SNAKE_CASE )
AutoFeatureExtractor.register(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
AutoFeatureExtractor.register(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE : Union[str, Any] = CustomFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Dict = AutoFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def _lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
class lowerCAmelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : str = True
try:
AutoConfig.register('custom' , _SCREAMING_SNAKE_CASE )
AutoFeatureExtractor.register(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE : str = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE : List[Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE : Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(not hasattr(_SCREAMING_SNAKE_CASE , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 265 | 1 |
__a : int = {str(digit): digit**5 for digit in range(1_0)}
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(lowercase ) )
def UpperCAmelCase ( ):
"""simple docstring"""
return sum(
number
for number in range(1000 , 1000000 )
if number == digits_fifth_powers_sum(lowercase ) )
if __name__ == "__main__":
print(solution()) | 522 | from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
__a : Dict = logging.get_logger(__name__)
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
__a : str = ['''pixel_values''']
def __init__( self , lowerCAmelCase__ = True , lowerCAmelCase__ = 1 / 2_55 , lowerCAmelCase__ = True , lowerCAmelCase__ = 8 , **lowerCAmelCase__ , ) -> None:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
__lowercase = do_rescale
__lowercase = rescale_factor
__lowercase = do_pad
__lowercase = pad_size
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ ) -> np.ndarray:
'''simple docstring'''
return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[Any]:
'''simple docstring'''
__lowercase , __lowercase = get_image_size(lowerCAmelCase__ )
__lowercase = (old_height // size + 1) * size - old_height
__lowercase = (old_width // size + 1) * size - old_width
return pad(lowerCAmelCase__ , ((0, pad_height), (0, pad_width)) , mode='''symmetric''' , data_format=lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = ChannelDimension.FIRST , **lowerCAmelCase__ , ) -> Tuple:
'''simple docstring'''
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase = do_pad if do_pad is not None else self.do_pad
__lowercase = pad_size if pad_size is not None else self.pad_size
__lowercase = make_list_of_images(lowerCAmelCase__ )
if not valid_images(lowerCAmelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(lowerCAmelCase__ ) for image in images]
if do_rescale:
__lowercase = [self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__ ) for image in images]
if do_pad:
__lowercase = [self.pad(lowerCAmelCase__ , size=lowerCAmelCase__ ) for image in images]
__lowercase = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ ) for image in images]
__lowercase = {'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ ) | 522 | 1 |
'''simple docstring'''
lowercase : List[Any] = [
[0, 1_6, 1_3, 0, 0, 0],
[0, 0, 1_0, 1_2, 0, 0],
[0, 4, 0, 0, 1_4, 0],
[0, 0, 9, 0, 0, 2_0],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def __a ( A__ , A__ , A__ , A__ ) -> Dict:
# Return True if there is node that has not iterated.
lowerCAmelCase = [False] * len(A__ )
lowerCAmelCase = [s]
lowerCAmelCase = True
while queue:
lowerCAmelCase = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(A__ )
lowerCAmelCase = True
lowerCAmelCase = u
return visited[t]
def __a ( A__ , A__ , A__ ) -> Union[str, Any]:
lowerCAmelCase = [-1] * (len(A__ ))
lowerCAmelCase = 0
lowerCAmelCase = []
lowerCAmelCase = [i[:] for i in graph] # Record original cut, copy.
while bfs(A__ , A__ , A__ , A__ ):
lowerCAmelCase = float("Inf" )
lowerCAmelCase = sink
while s != source:
# Find the minimum value in select path
lowerCAmelCase = min(A__ , graph[parent[s]][s] )
lowerCAmelCase = parent[s]
max_flow += path_flow
lowerCAmelCase = sink
while v != source:
lowerCAmelCase = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowerCAmelCase = parent[v]
for i in range(len(A__ ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 649 |
'''simple docstring'''
from __future__ import annotations
def __a ( A__ , A__ = None , A__ = None , A__ = False , ) -> tuple[int, float, str]:
lowerCAmelCase = cipher_alphabet or [chr(A__ ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
lowerCAmelCase = {
"a": 0.08_497,
"b": 0.01_492,
"c": 0.02_202,
"d": 0.04_253,
"e": 0.11_162,
"f": 0.02_228,
"g": 0.02_015,
"h": 0.06_094,
"i": 0.07_546,
"j": 0.00_153,
"k": 0.01_292,
"l": 0.04_025,
"m": 0.02_406,
"n": 0.06_749,
"o": 0.07_507,
"p": 0.01_929,
"q": 0.00_095,
"r": 0.07_587,
"s": 0.06_327,
"t": 0.09_356,
"u": 0.02_758,
"v": 0.00_978,
"w": 0.02_560,
"x": 0.00_150,
"y": 0.01_994,
"z": 0.00_077,
}
else:
# Custom frequencies dictionary
lowerCAmelCase = frequencies_dict
if not case_sensitive:
lowerCAmelCase = ciphertext.lower()
# Chi squared statistic values
lowerCAmelCase = {}
# cycle through all of the shifts
for shift in range(len(A__ ) ):
lowerCAmelCase = ""
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
lowerCAmelCase = (alphabet_letters.index(letter.lower() ) - shift) % len(
A__ )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
lowerCAmelCase = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
lowerCAmelCase = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
lowerCAmelCase = decrypted_with_shift.lower().count(A__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCAmelCase = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCAmelCase = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
lowerCAmelCase = decrypted_with_shift.count(A__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCAmelCase = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCAmelCase = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
lowerCAmelCase = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(A__ ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
lowerCAmelCase = min(
A__ , key=A__ , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 649 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase__ = {
"configuration_mobilenet_v2": [
"MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileNetV2Config",
"MobileNetV2OnnxConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ["MobileNetV2FeatureExtractor"]
lowerCAmelCase__ = ["MobileNetV2ImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileNetV2ForImageClassification",
"MobileNetV2ForSemanticSegmentation",
"MobileNetV2Model",
"MobileNetV2PreTrainedModel",
"load_tf_weights_in_mobilenet_v2",
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 594 | from __future__ import annotations
from collections import Counter
from random import random
class _a :
"""simple docstring"""
def __init__( self ):
_lowercase ={}
def __lowerCAmelCase ( self , lowerCAmelCase_ ):
_lowercase ={}
def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
if nodea not in self.connections:
self.add_node(lowerCAmelCase_ )
if nodea not in self.connections:
self.add_node(lowerCAmelCase_ )
_lowercase =probability
def __lowerCAmelCase ( self ):
return list(self.connections )
def __lowerCAmelCase ( self , lowerCAmelCase_ ):
_lowercase =0
_lowercase =random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def __lowerCamelCase ( __a : str , __a : list[tuple[str, str, float]] , __a : int ) -> dict[str, int]:
_lowercase =MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(__a , __a , __a )
_lowercase =Counter(graph.get_nodes() )
_lowercase =start
for _ in range(__a ):
_lowercase =graph.transition(__a )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 594 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
_a = tempfile.mkdtemp()
_a = BlipImageProcessor()
_a = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
_a = BlipaProcessor(lowerCAmelCase_ , lowerCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : int , **lowerCAmelCase_ : Any ) -> Any:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ).tokenizer
def __lowerCAmelCase ( self : Tuple , **lowerCAmelCase_ : Any ) -> Any:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ).image_processor
def __lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
_a = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_a = [Image.fromarray(np.moveaxis(lowerCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
_a = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_a = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_a = self.get_image_processor(do_normalize=lowerCAmelCase_ , padding_value=1.0 )
_a = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=lowerCAmelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase_ )
def __lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = BlipaProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
_a = self.prepare_image_inputs()
_a = image_processor(lowerCAmelCase_ , return_tensors='''np''' )
_a = processor(images=lowerCAmelCase_ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = BlipaProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
_a = '''lower newer'''
_a = processor(text=lowerCAmelCase_ )
_a = tokenizer(lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = BlipaProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
_a = '''lower newer'''
_a = self.prepare_image_inputs()
_a = processor(text=lowerCAmelCase_ , images=lowerCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase_ ):
processor()
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = BlipaProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
_a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_a = processor.batch_decode(lowerCAmelCase_ )
_a = tokenizer.batch_decode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = BlipaProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
_a = '''lower newer'''
_a = self.prepare_image_inputs()
_a = processor(text=lowerCAmelCase_ , images=lowerCAmelCase_ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 22 |
"""simple docstring"""
def snake_case ( _a: int )-> int:
'''simple docstring'''
if not isinstance(_a , _a ):
raise ValueError('Input must be an integer' )
if input_num <= 0:
raise ValueError('Input must be positive' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 510 | 0 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
# vision encoder
if "img_encoder.pos_embed" in name:
lowerCAmelCase_ : Dict =name.replace('''img_encoder.pos_embed''' , '''vision_model.embeddings.position_embeddings''' )
if "img_encoder.patch_embed.proj" in name:
lowerCAmelCase_ : Optional[int] =name.replace('''img_encoder.patch_embed.proj''' , '''vision_model.embeddings.patch_embeddings.projection''' )
if "img_encoder.patch_embed.norm" in name:
lowerCAmelCase_ : Union[str, Any] =name.replace('''img_encoder.patch_embed.norm''' , '''vision_model.embeddings.layernorm''' )
if "img_encoder.layers" in name:
lowerCAmelCase_ : List[str] =name.replace('''img_encoder.layers''' , '''vision_model.encoder.stages''' )
if "blocks" in name and "res" not in name:
lowerCAmelCase_ : Dict =name.replace('''blocks''' , '''layers''' )
if "attn" in name and "pre_assign" not in name:
lowerCAmelCase_ : Optional[Any] =name.replace('''attn''' , '''self_attn''' )
if "proj" in name and "self_attn" in name and "text" not in name:
lowerCAmelCase_ : Tuple =name.replace('''proj''' , '''out_proj''' )
if "pre_assign_attn.attn.proj" in name:
lowerCAmelCase_ : Optional[int] =name.replace('''pre_assign_attn.attn.proj''' , '''pre_assign_attn.attn.out_proj''' )
if "norm1" in name:
lowerCAmelCase_ : Tuple =name.replace('''norm1''' , '''layer_norm1''' )
if "norm2" in name and "pre_assign" not in name:
lowerCAmelCase_ : Optional[int] =name.replace('''norm2''' , '''layer_norm2''' )
if "img_encoder.norm" in name:
lowerCAmelCase_ : Dict =name.replace('''img_encoder.norm''' , '''vision_model.layernorm''' )
# text encoder
if "text_encoder.token_embedding" in name:
lowerCAmelCase_ : int =name.replace('''text_encoder.token_embedding''' , '''text_model.embeddings.token_embedding''' )
if "text_encoder.positional_embedding" in name:
lowerCAmelCase_ : Any =name.replace('''text_encoder.positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' )
if "text_encoder.transformer.resblocks." in name:
lowerCAmelCase_ : List[Any] =name.replace('''text_encoder.transformer.resblocks.''' , '''text_model.encoder.layers.''' )
if "ln_1" in name:
lowerCAmelCase_ : Any =name.replace('''ln_1''' , '''layer_norm1''' )
if "ln_2" in name:
lowerCAmelCase_ : Tuple =name.replace('''ln_2''' , '''layer_norm2''' )
if "c_fc" in name:
lowerCAmelCase_ : Union[str, Any] =name.replace('''c_fc''' , '''fc1''' )
if "c_proj" in name:
lowerCAmelCase_ : Optional[int] =name.replace('''c_proj''' , '''fc2''' )
if "text_encoder" in name:
lowerCAmelCase_ : Tuple =name.replace('''text_encoder''' , '''text_model''' )
if "ln_final" in name:
lowerCAmelCase_ : Union[str, Any] =name.replace('''ln_final''' , '''final_layer_norm''' )
# projection layers
if "img_projector.linear_hidden." in name:
lowerCAmelCase_ : Union[str, Any] =name.replace('''img_projector.linear_hidden.''' , '''visual_projection.''' )
if "img_projector.linear_out." in name:
lowerCAmelCase_ : List[Any] =name.replace('''img_projector.linear_out.''' , '''visual_projection.3.''' )
if "text_projector.linear_hidden" in name:
lowerCAmelCase_ : Tuple =name.replace('''text_projector.linear_hidden''' , '''text_projection''' )
if "text_projector.linear_out" in name:
lowerCAmelCase_ : List[Any] =name.replace('''text_projector.linear_out''' , '''text_projection.3''' )
return name
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
for key in orig_state_dict.copy().keys():
lowerCAmelCase_ : str =orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCAmelCase_ : Dict =key.split('''.''' )
lowerCAmelCase_ : Any =int(key_split[2] ), int(key_split[4] )
lowerCAmelCase_ : Optional[Any] =config.vision_config.hidden_size
if "weight" in key:
lowerCAmelCase_ : Tuple =val[:dim, :]
lowerCAmelCase_ : Optional[int] =val[dim : dim * 2, :]
lowerCAmelCase_ : Union[str, Any] =val[-dim:, :]
else:
lowerCAmelCase_ : str =val[:dim]
lowerCAmelCase_ : Any =val[dim : dim * 2]
lowerCAmelCase_ : Optional[int] =val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCAmelCase_ : Dict =key.split('''.''' )
lowerCAmelCase_ : Optional[Any] =int(key_split[3] )
lowerCAmelCase_ : int =config.text_config.hidden_size
if "weight" in key:
lowerCAmelCase_ : int =val[:dim, :]
lowerCAmelCase_ : Union[str, Any] =val[
dim : dim * 2, :
]
lowerCAmelCase_ : int =val[-dim:, :]
else:
lowerCAmelCase_ : Any =val[:dim]
lowerCAmelCase_ : Dict =val[dim : dim * 2]
lowerCAmelCase_ : Any =val[-dim:]
else:
lowerCAmelCase_ : Union[str, Any] =rename_key(_SCREAMING_SNAKE_CASE )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowerCAmelCase_ : Optional[int] =val.squeeze_()
else:
lowerCAmelCase_ : Optional[int] =val
return orig_state_dict
def SCREAMING_SNAKE_CASE__ ( ):
lowerCAmelCase_ : Any ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase_ : List[Any] =Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="groupvit-gcc-yfcc" , _SCREAMING_SNAKE_CASE=False ):
lowerCAmelCase_ : Union[str, Any] =GroupViTConfig()
lowerCAmelCase_ : str =GroupViTModel(_SCREAMING_SNAKE_CASE ).eval()
lowerCAmelCase_ : Any =torch.load(_SCREAMING_SNAKE_CASE , map_location='''cpu''' )['''model''']
lowerCAmelCase_ : int =convert_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase_ : Optional[int] =model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(_SCREAMING_SNAKE_CASE ) == 0)
# verify result
lowerCAmelCase_ : str =CLIPProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
lowerCAmelCase_ : Optional[Any] =prepare_img()
lowerCAmelCase_ : Tuple =processor(text=['''a photo of a cat''', '''a photo of a dog'''] , images=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
with torch.no_grad():
lowerCAmelCase_ : int =model(**_SCREAMING_SNAKE_CASE )
if model_name == "groupvit-gcc-yfcc":
lowerCAmelCase_ : Dict =torch.tensor([[13.35_23, 6.36_29]] )
elif model_name == "groupvit-gcc-redcaps":
lowerCAmelCase_ : Optional[int] =torch.tensor([[16.18_73, 8.62_30]] )
else:
raise ValueError(f'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , _SCREAMING_SNAKE_CASE , atol=1e-3 )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print('''Successfully saved processor and model to''' , _SCREAMING_SNAKE_CASE )
if push_to_hub:
print('''Pushing to the hub...''' )
processor.push_to_hub(_SCREAMING_SNAKE_CASE , organization='''nielsr''' )
model.push_to_hub(_SCREAMING_SNAKE_CASE , organization='''nielsr''' )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to dump the processor and PyTorch model.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to GroupViT checkpoint''')
parser.add_argument(
'''--model_name''',
default='''groupvit-gccy-fcc''',
type=str,
help='''Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.''',
)
__lowercase = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 708 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _snake_case ( lowerCAmelCase_ ):
"""simple docstring"""
_UpperCamelCase : UNetaDModel
_UpperCamelCase : ScoreSdeVeScheduler
def __init__( self : Union[str, Any] , UpperCamelCase_ : UNetaDModel , UpperCamelCase_ : ScoreSdeVeScheduler ):
super().__init__()
self.register_modules(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ )
@torch.no_grad()
def __call__( self : Union[str, Any] , UpperCamelCase_ : int = 1 , UpperCamelCase_ : int = 2000 , UpperCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_ : Optional[str] = "pil" , UpperCamelCase_ : bool = True , **UpperCamelCase_ : Dict , ):
lowerCAmelCase_ : Union[str, Any] =self.unet.config.sample_size
lowerCAmelCase_ : Dict =(batch_size, 3, img_size, img_size)
lowerCAmelCase_ : Dict =self.unet
lowerCAmelCase_ : Optional[int] =randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ ) * self.scheduler.init_noise_sigma
lowerCAmelCase_ : Any =sample.to(self.device )
self.scheduler.set_timesteps(UpperCamelCase_ )
self.scheduler.set_sigmas(UpperCamelCase_ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowerCAmelCase_ : Optional[Any] =self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
lowerCAmelCase_ : int =self.unet(UpperCamelCase_ , UpperCamelCase_ ).sample
lowerCAmelCase_ : List[str] =self.scheduler.step_correct(UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
# prediction step
lowerCAmelCase_ : Any =model(UpperCamelCase_ , UpperCamelCase_ ).sample
lowerCAmelCase_ : Tuple =self.scheduler.step_pred(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ )
lowerCAmelCase_ , lowerCAmelCase_ : Tuple =output.prev_sample, output.prev_sample_mean
lowerCAmelCase_ : Tuple =sample_mean.clamp(0 , 1 )
lowerCAmelCase_ : int =sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase_ : Optional[Any] =self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=UpperCamelCase_ )
| 305 | 0 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'pytorch',
'script': 'run_ddp.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'tensorflow',
'script': 'run_tf_dist.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.6, 'eval_loss': 0.7},
},
] )
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : List[str] ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=_A , )
assert hasattr(self , '''env''' )
def lowercase_ ( self : List[Any] , _A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = f"""{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}"""
# distributed data settings
UpperCAmelCase__ : int = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=_A , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=_A , py_version='''py36''' , )
def lowercase_ ( self : Optional[int] , _A : Any ):
'''simple docstring'''
TrainingJobAnalytics(_A ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def lowercase_ ( self : Optional[int] , _A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.create_estimator(_A )
# run training
estimator.fit()
# result dataframe
UpperCAmelCase__ : Union[str, Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCAmelCase__ : Dict = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
UpperCAmelCase__ : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase__ : Any = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _A )
| 75 |
"""simple docstring"""
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a__ : Any = StableUnCLIPPipeline
a__ : Dict = TEXT_TO_IMAGE_PARAMS
a__ : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
a__ : int = TEXT_TO_IMAGE_IMAGE_PARAMS
a__ : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
a__ : Optional[int] = False
def a ( self : List[str] ):
__UpperCAmelCase = 32
__UpperCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowercase , projection_dim=_lowercase , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
__UpperCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_lowercase , num_layers=1 , )
torch.manual_seed(0 )
__UpperCAmelCase = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=10_00 , clip_sample=_lowercase , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
__UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=_lowercase )
__UpperCAmelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowercase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
__UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_lowercase , layers_per_block=1 , upcast_attention=_lowercase , use_linear_projection=_lowercase , )
torch.manual_seed(0 )
__UpperCAmelCase = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.00_085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=_lowercase , steps_offset=1 , )
torch.manual_seed(0 )
__UpperCAmelCase = AutoencoderKL()
__UpperCAmelCase = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def a ( self : str , _lowercase : Dict , _lowercase : List[str]=0 ):
if str(_lowercase ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(_lowercase )
else:
__UpperCAmelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def a ( self : Any ):
__UpperCAmelCase = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=_lowercase )
def a ( self : int ):
__UpperCAmelCase = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=_lowercase )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : Any ):
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
__UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__UpperCAmelCase = pipe('''anime turle''' , generator=_lowercase , output_type='''np''' )
__UpperCAmelCase = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(_lowercase , _lowercase )
def a ( self : Any ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
__UpperCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
__UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 49 | 0 |
'''simple docstring'''
_UpperCAmelCase : Any = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
_UpperCAmelCase : int = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
_UpperCAmelCase : Any = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 717 |
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def UpperCamelCase ( ) -> None:
'''simple docstring'''
lowercase =input('''Enter message: ''' )
lowercase =input('''Enter key [alphanumeric]: ''' )
lowercase =input('''Encrypt/Decrypt [e/d]: ''' )
if mode.lower().startswith('''e''' ):
lowercase ='''encrypt'''
lowercase =encrypt_message(lowercase_ , lowercase_ )
elif mode.lower().startswith('''d''' ):
lowercase ='''decrypt'''
lowercase =decrypt_message(lowercase_ , lowercase_ )
print(f'\n{mode.title()}ed message:' )
print(lowercase_ )
def UpperCamelCase ( lowercase_ : str , lowercase_ : str ) -> str:
'''simple docstring'''
return translate_message(lowercase_ , lowercase_ , '''encrypt''' )
def UpperCamelCase ( lowercase_ : str , lowercase_ : str ) -> str:
'''simple docstring'''
return translate_message(lowercase_ , lowercase_ , '''decrypt''' )
def UpperCamelCase ( lowercase_ : str , lowercase_ : str , lowercase_ : str ) -> str:
'''simple docstring'''
lowercase =[]
lowercase =0
lowercase =key.upper()
for symbol in message:
lowercase =LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(lowercase_ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(lowercase_ ):
lowercase =0
else:
translated.append(lowercase_ )
return "".join(lowercase_ )
if __name__ == "__main__":
main()
| 145 | 0 |
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class a__ ( TensorFormatter[Mapping, '''torch.Tensor''', Mapping] ):
def __init__( self :str , _lowerCamelCase :Optional[Any]=None , **_lowerCamelCase :Tuple ):
'''simple docstring'''
super().__init__(features=_lowerCamelCase )
UpperCamelCase_ : Optional[Any] =torch_tensor_kwargs
import torch # noqa import torch at initialization
def lowerCamelCase_ ( self :Optional[int] , _lowerCamelCase :Optional[Any] ):
'''simple docstring'''
import torch
if isinstance(_lowerCamelCase , _lowerCamelCase ) and column:
if all(
isinstance(_lowerCamelCase , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(_lowerCamelCase )
return column
def lowerCamelCase_ ( self :Any , _lowerCamelCase :List[Any] ):
'''simple docstring'''
import torch
if isinstance(_lowerCamelCase , (str, bytes, type(_lowerCamelCase )) ):
return value
elif isinstance(_lowerCamelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
UpperCamelCase_ : str ={}
if isinstance(_lowerCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
UpperCamelCase_ : Any ={'dtype': torch.intaa}
elif isinstance(_lowerCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
UpperCamelCase_ : int ={'dtype': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_lowerCamelCase , PIL.Image.Image ):
UpperCamelCase_ : str =np.asarray(_lowerCamelCase )
return torch.tensor(_lowerCamelCase , **{**default_dtype, **self.torch_tensor_kwargs} )
def lowerCamelCase_ ( self :Dict , _lowerCamelCase :Union[str, Any] ):
'''simple docstring'''
import torch
# support for torch, tf, jax etc.
if hasattr(_lowerCamelCase , '__array__' ) and not isinstance(_lowerCamelCase , torch.Tensor ):
UpperCamelCase_ : Optional[int] =data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_lowerCamelCase , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_lowerCamelCase ) for substruct in data_struct] )
elif isinstance(_lowerCamelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(_lowerCamelCase ) for substruct in data_struct] )
return self._tensorize(_lowerCamelCase )
def lowerCamelCase_ ( self :Any , _lowerCamelCase :dict ):
'''simple docstring'''
return map_nested(self._recursive_tensorize , _lowerCamelCase , map_list=_lowerCamelCase )
def lowerCamelCase_ ( self :Union[str, Any] , _lowerCamelCase :pa.Table ):
'''simple docstring'''
UpperCamelCase_ : List[Any] =self.numpy_arrow_extractor().extract_row(_lowerCamelCase )
UpperCamelCase_ : Dict =self.python_features_decoder.decode_row(_lowerCamelCase )
return self.recursive_tensorize(_lowerCamelCase )
def lowerCamelCase_ ( self :List[Any] , _lowerCamelCase :pa.Table ):
'''simple docstring'''
UpperCamelCase_ : List[str] =self.numpy_arrow_extractor().extract_column(_lowerCamelCase )
UpperCamelCase_ : Any =self.python_features_decoder.decode_column(_lowerCamelCase , pa_table.column_names[0] )
UpperCamelCase_ : Union[str, Any] =self.recursive_tensorize(_lowerCamelCase )
UpperCamelCase_ : Tuple =self._consolidate(_lowerCamelCase )
return column
def lowerCamelCase_ ( self :Optional[Any] , _lowerCamelCase :pa.Table ):
'''simple docstring'''
UpperCamelCase_ : List[str] =self.numpy_arrow_extractor().extract_batch(_lowerCamelCase )
UpperCamelCase_ : Union[str, Any] =self.python_features_decoder.decode_batch(_lowerCamelCase )
UpperCamelCase_ : Union[str, Any] =self.recursive_tensorize(_lowerCamelCase )
for column_name in batch:
UpperCamelCase_ : int =self._consolidate(batch[column_name] )
return batch
| 357 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__SCREAMING_SNAKE_CASE = {
'vocab_file': {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'
),
}
}
__SCREAMING_SNAKE_CASE = {
'junnyu/roformer_chinese_small': 1_536,
'junnyu/roformer_chinese_base': 1_536,
'junnyu/roformer_chinese_char_small': 512,
'junnyu/roformer_chinese_char_base': 512,
'junnyu/roformer_small_discriminator': 128,
'junnyu/roformer_small_generator': 128,
}
__SCREAMING_SNAKE_CASE = {
'junnyu/roformer_chinese_small': {'do_lower_case': True},
'junnyu/roformer_chinese_base': {'do_lower_case': True},
'junnyu/roformer_chinese_char_small': {'do_lower_case': True},
'junnyu/roformer_chinese_char_base': {'do_lower_case': True},
'junnyu/roformer_small_discriminator': {'do_lower_case': True},
'junnyu/roformer_small_generator': {'do_lower_case': True},
}
class a__ ( A__ ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = RoFormerTokenizer
def __init__( self :Tuple , _lowerCamelCase :Dict=None , _lowerCamelCase :Dict=None , _lowerCamelCase :List[Any]=True , _lowerCamelCase :Dict="[UNK]" , _lowerCamelCase :List[str]="[SEP]" , _lowerCamelCase :str="[PAD]" , _lowerCamelCase :Optional[Any]="[CLS]" , _lowerCamelCase :Optional[int]="[MASK]" , _lowerCamelCase :str=True , _lowerCamelCase :Tuple=None , **_lowerCamelCase :Any , ):
'''simple docstring'''
super().__init__(
_lowerCamelCase , tokenizer_file=_lowerCamelCase , do_lower_case=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , pad_token=_lowerCamelCase , cls_token=_lowerCamelCase , mask_token=_lowerCamelCase , tokenize_chinese_chars=_lowerCamelCase , strip_accents=_lowerCamelCase , **_lowerCamelCase , )
UpperCamelCase_ : Any =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('lowercase' , _lowerCamelCase ) != do_lower_case
or pre_tok_state.get('strip_accents' , _lowerCamelCase ) != strip_accents
):
UpperCamelCase_ : Optional[Any] =getattr(_lowerCamelCase , pre_tok_state.pop('type' ) )
UpperCamelCase_ : Tuple =do_lower_case
UpperCamelCase_ : Union[str, Any] =strip_accents
UpperCamelCase_ : Union[str, Any] =pre_tok_class(**_lowerCamelCase )
UpperCamelCase_ : List[str] =do_lower_case
def __getstate__( self :Any ):
'''simple docstring'''
UpperCamelCase_ : str =self.__dict__.copy()
UpperCamelCase_ : Union[str, Any] =BertPreTokenizer()
return state
def __setstate__( self :str , _lowerCamelCase :Optional[Any] ):
'''simple docstring'''
UpperCamelCase_ : int =d
UpperCamelCase_ : Optional[int] =self.__dict__['_tokenizer'].get_vocab()
UpperCamelCase_ : Any =PreTokenizer.custom(JiebaPreTokenizer(_lowerCamelCase ) )
def lowerCamelCase_ ( self :Dict , _lowerCamelCase :Optional[Any] , _lowerCamelCase :Any=None ):
'''simple docstring'''
UpperCamelCase_ : int =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase_ ( self :Union[str, Any] , _lowerCamelCase :List[int] , _lowerCamelCase :Optional[List[int]] = None ):
'''simple docstring'''
UpperCamelCase_ : List[Any] =[self.sep_token_id]
UpperCamelCase_ : List[Any] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase_ ( self :Optional[int] , _lowerCamelCase :str , _lowerCamelCase :Optional[str] = None ):
'''simple docstring'''
UpperCamelCase_ : List[Any] =self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase )
return tuple(_lowerCamelCase )
def lowerCamelCase_ ( self :str , _lowerCamelCase :List[str] , _lowerCamelCase :List[Any]=None , _lowerCamelCase :Optional[Any]=None , _lowerCamelCase :int=False , **_lowerCamelCase :Optional[int] , ):
'''simple docstring'''
UpperCamelCase_ : str =BertPreTokenizer()
return super().save_pretrained(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
| 357 | 1 |
import mpmath # for roots of unity
import numpy as np
class snake_case__ :
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : List[Any]=None ) -> List[str]:
# Input as list
UpperCAmelCase_ = list(poly_a or [0] )[:]
UpperCAmelCase_ = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
UpperCAmelCase_ = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
UpperCAmelCase_ = len(self.polyB )
# Add 0 to make lengths equal a power of 2
UpperCAmelCase_ = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
UpperCAmelCase_ = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
UpperCAmelCase_ = self.__multiply()
def UpperCamelCase ( self : Dict , lowerCAmelCase_ : List[str] ) -> Any:
UpperCAmelCase_ = [[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(lowerCAmelCase_ ) <= 1:
return dft[0]
#
UpperCAmelCase_ = self.c_max_length // 2
while next_ncol > 0:
UpperCAmelCase_ = [[] for i in range(lowerCAmelCase_ )]
UpperCAmelCase_ = self.root**next_ncol
# First half of next step
UpperCAmelCase_ = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(lowerCAmelCase_ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
UpperCAmelCase_ = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(lowerCAmelCase_ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
UpperCAmelCase_ = new_dft
UpperCAmelCase_ = next_ncol // 2
return dft[0]
def UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ = self.__dft('''A''' )
UpperCAmelCase_ = self.__dft('''B''' )
UpperCAmelCase_ = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
UpperCAmelCase_ = 2
while next_ncol <= self.c_max_length:
UpperCAmelCase_ = [[] for i in range(lowerCAmelCase_ )]
UpperCAmelCase_ = self.root ** (next_ncol // 2)
UpperCAmelCase_ = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
UpperCAmelCase_ = new_inverse_c
next_ncol *= 2
# Unpack
UpperCAmelCase_ = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : int ) -> List[Any]:
UpperCAmelCase_ = '''A = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
UpperCAmelCase_ = '''B = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
UpperCAmelCase_ = '''A*B = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return F'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def _lowerCAmelCase ( __magic_name__ :list , __magic_name__ :list , __magic_name__ :list , __magic_name__ :list , __magic_name__ :list ):
UpperCAmelCase_ = np.array([[1, item, train_mtch[i]] for i, item in enumerate(__magic_name__ )] )
UpperCAmelCase_ = np.array(__magic_name__ )
UpperCAmelCase_ = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , __magic_name__ ) ) , x.transpose() ) , __magic_name__ )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def _lowerCAmelCase ( __magic_name__ :list , __magic_name__ :list , __magic_name__ :list ):
UpperCAmelCase_ = (1, 2, 1)
UpperCAmelCase_ = (1, 1, 0, 7)
UpperCAmelCase_ = SARIMAX(
__magic_name__ , exog=__magic_name__ , order=__magic_name__ , seasonal_order=__magic_name__ )
UpperCAmelCase_ = model.fit(disp=__magic_name__ , maxiter=6_0_0 , method='''nm''' )
UpperCAmelCase_ = model_fit.predict(1 , len(__magic_name__ ) , exog=[test_match] )
return result[0]
def _lowerCAmelCase ( __magic_name__ :list , __magic_name__ :list , __magic_name__ :list ):
UpperCAmelCase_ = SVR(kernel='''rbf''' , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(__magic_name__ , __magic_name__ )
UpperCAmelCase_ = regressor.predict(__magic_name__ )
return y_pred[0]
def _lowerCAmelCase ( __magic_name__ :list ):
train_user.sort()
UpperCAmelCase_ = np.percentile(__magic_name__ , 2_5 )
UpperCAmelCase_ = np.percentile(__magic_name__ , 7_5 )
UpperCAmelCase_ = qa - qa
UpperCAmelCase_ = qa - (iqr * 0.1)
return low_lim
def _lowerCAmelCase ( __magic_name__ :list , __magic_name__ :float ):
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
for i in list_vote:
if i > actual_result:
UpperCAmelCase_ = not_safe + 1
else:
if abs(abs(__magic_name__ ) - abs(__magic_name__ ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
_lowerCamelCase : List[str] = [[1_8231, 0.0, 1], [2_2621, 1.0, 2], [1_5675, 0.0, 3], [2_3583, 1.0, 4]]
_lowerCamelCase : Any = pd.DataFrame(
data_input, columns=['total_user', 'total_even', 'days']
)
_lowerCamelCase : Optional[Any] = Normalizer().fit_transform(data_input_df.values)
# split data
_lowerCamelCase : List[str] = normalize_df[:, 2].tolist()
_lowerCamelCase : Dict = normalize_df[:, 0].tolist()
_lowerCamelCase : Optional[Any] = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
_lowerCamelCase : Union[str, Any] = normalize_df[:, [1, 2]].tolist()
_lowerCamelCase : Any = x[: len(x) - 1]
_lowerCamelCase : Optional[int] = x[len(x) - 1 :]
# for linear regression & sarimax
_lowerCamelCase : List[str] = total_date[: len(total_date) - 1]
_lowerCamelCase : Any = total_user[: len(total_user) - 1]
_lowerCamelCase : Dict = total_match[: len(total_match) - 1]
_lowerCamelCase : Any = total_date[len(total_date) - 1 :]
_lowerCamelCase : List[str] = total_user[len(total_user) - 1 :]
_lowerCamelCase : Any = total_match[len(total_match) - 1 :]
# voting system with forecasting
_lowerCamelCase : List[str] = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
_lowerCamelCase : int = '' if data_safety_checker(res_vote, tst_user) else 'not '
print('Today\'s data is {not_str}safe.')
| 407 | 0 |
'''simple docstring'''
def __A ( UpperCAmelCase ,UpperCAmelCase ) -> None:
'''simple docstring'''
_UpperCamelCase : Optional[int] = len(UpperCAmelCase )
print("The following activities are selected:" )
# The first activity is always selected
_UpperCamelCase : List[str] = 0
print(UpperCAmelCase ,end="," )
# Consider rest of the activities
for j in range(UpperCAmelCase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(UpperCAmelCase ,end="," )
_UpperCamelCase : Optional[Any] = j
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase_ : List[Any] = [1, 3, 0, 5, 8, 5]
lowerCAmelCase_ : Dict = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 435 | '''simple docstring'''
from __future__ import annotations
import time
lowerCAmelCase_ : Any = list[tuple[int, int]]
lowerCAmelCase_ : List[str] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowerCAmelCase_ : Optional[Any] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : int , lowercase__ : int , lowercase__ : int , lowercase__ : int , lowercase__ : int , lowercase__ : Node | None ) ->List[Any]:
'''simple docstring'''
_UpperCamelCase : int = pos_x
_UpperCamelCase : List[Any] = pos_y
_UpperCamelCase : List[Any] = (pos_y, pos_x)
_UpperCamelCase : Any = goal_x
_UpperCamelCase : Optional[int] = goal_y
_UpperCamelCase : int = parent
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Dict , lowercase__ : tuple[int, int] , lowercase__ : tuple[int, int] ) ->int:
'''simple docstring'''
_UpperCamelCase : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , lowercase__ )
_UpperCamelCase : Dict = Node(goal[1] , goal[0] , goal[1] , goal[0] , lowercase__ )
_UpperCamelCase : Tuple = [self.start]
_UpperCamelCase : Union[str, Any] = False
def snake_case__ ( self : Tuple ) ->Path | None:
'''simple docstring'''
while self.node_queue:
_UpperCamelCase : Optional[int] = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
_UpperCamelCase : Tuple = True
return self.retrace_path(lowercase__ )
_UpperCamelCase : List[str] = self.get_successors(lowercase__ )
for node in successors:
self.node_queue.append(lowercase__ )
if not self.reached:
return [self.start.pos]
return None
def snake_case__ ( self : int , lowercase__ : Node ) ->list[Node]:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = []
for action in delta:
_UpperCamelCase : Any = parent.pos_x + action[1]
_UpperCamelCase : Union[str, Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowercase__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(lowercase__ , lowercase__ , self.target.pos_y , self.target.pos_x , lowercase__ ) )
return successors
def snake_case__ ( self : str , lowercase__ : Node | None ) ->Path:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = node
_UpperCamelCase : Tuple = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_UpperCamelCase : List[Any] = current_node.parent
path.reverse()
return path
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : int , lowercase__ : Optional[int] , lowercase__ : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : Optional[int] = BreadthFirstSearch(lowercase__ , lowercase__ )
_UpperCamelCase : List[str] = BreadthFirstSearch(lowercase__ , lowercase__ )
_UpperCamelCase : int = False
def snake_case__ ( self : List[Any] ) ->Path | None:
'''simple docstring'''
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
_UpperCamelCase : Dict = self.fwd_bfs.node_queue.pop(0 )
_UpperCamelCase : Any = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
_UpperCamelCase : List[str] = True
return self.retrace_bidirectional_path(
lowercase__ , lowercase__ )
_UpperCamelCase : Dict = current_bwd_node
_UpperCamelCase : Optional[int] = current_fwd_node
_UpperCamelCase : List[Any] = {
self.fwd_bfs: self.fwd_bfs.get_successors(lowercase__ ),
self.bwd_bfs: self.bwd_bfs.get_successors(lowercase__ ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(lowercase__ )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def snake_case__ ( self : Dict , lowercase__ : Node , lowercase__ : Node ) ->Path:
'''simple docstring'''
_UpperCamelCase : Tuple = self.fwd_bfs.retrace_path(lowercase__ )
_UpperCamelCase : int = self.bwd_bfs.retrace_path(lowercase__ )
bwd_path.pop()
bwd_path.reverse()
_UpperCamelCase : Optional[int] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
lowerCAmelCase_ : List[Any] = (0, 0)
lowerCAmelCase_ : List[Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
lowerCAmelCase_ : Any = time.time()
lowerCAmelCase_ : Dict = BreadthFirstSearch(init, goal)
lowerCAmelCase_ : List[str] = bfs.search()
lowerCAmelCase_ : Optional[Any] = time.time() - start_bfs_time
print("""Unidirectional BFS computation time : """, bfs_time)
lowerCAmelCase_ : List[str] = time.time()
lowerCAmelCase_ : Tuple = BidirectionalBreadthFirstSearch(init, goal)
lowerCAmelCase_ : Optional[int] = bd_bfs.search()
lowerCAmelCase_ : Dict = time.time() - start_bd_bfs_time
print("""Bidirectional BFS computation time : """, bd_bfs_time)
| 435 | 1 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowercase : int , __lowercase : int ) -> int:
'''simple docstring'''
return 1 if input_a == input_a else 0
def UpperCAmelCase_ ( ) -> None:
'''simple docstring'''
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 119 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE :Optional[int] = logging.get_logger()
@dataclass
class A_ :
_lowerCamelCase : nn.Module
_lowerCamelCase : List[nn.Module] = field(default_factory=lowerCAmelCase_ )
_lowerCamelCase : list = field(default_factory=lowerCAmelCase_ )
def lowercase ( self : Dict , snake_case_ : Dict , snake_case_ : Tensor , snake_case_ : Tensor ):
_UpperCAmelCase = len(list(m.modules() ) ) == 1 or isinstance(snake_case_ , nn.Convad ) or isinstance(snake_case_ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(snake_case_ )
def __call__( self : str , snake_case_ : Tensor ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(snake_case_ )
[x.remove() for x in self.handles]
return self
@property
def lowercase ( self : int ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda snake_case_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class A_ :
_lowerCamelCase : nn.Module
_lowerCamelCase : nn.Module
_lowerCamelCase : int = 1
_lowerCamelCase : List = field(default_factory=lowerCAmelCase_ )
_lowerCamelCase : List = field(default_factory=lowerCAmelCase_ )
_lowerCamelCase : bool = True
def __call__( self : str , snake_case_ : Tensor ):
_UpperCAmelCase = Tracker(self.dest )(snake_case_ ).parametrized
_UpperCAmelCase = Tracker(self.src )(snake_case_ ).parametrized
_UpperCAmelCase = list(filter(lambda snake_case_ : type(snake_case_ ) not in self.src_skip , snake_case_ ) )
_UpperCAmelCase = list(filter(lambda snake_case_ : type(snake_case_ ) not in self.dest_skip , snake_case_ ) )
if len(snake_case_ ) != len(snake_case_ ) and self.raise_if_mismatch:
raise Exception(
f'Numbers of operations are different. Source module has {len(snake_case_ )} operations while'
f' destination module has {len(snake_case_ )}.' )
for dest_m, src_m in zip(snake_case_ , snake_case_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'Transfered from={src_m} to={dest_m}' )
class A_ ( nn.Module ):
def __init__( self : str , snake_case_ : nn.Module ):
super().__init__()
_UpperCAmelCase = []
# - get the stem
feature_blocks.append(("conv1", model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith("block" ), f'Unexpected layer name {k}'
_UpperCAmelCase = len(snake_case_ ) + 1
feature_blocks.append((f'res{block_index}', v) )
_UpperCAmelCase = nn.ModuleDict(snake_case_ )
def lowercase ( self : Optional[int] , snake_case_ : Tensor ):
return get_trunk_forward_outputs(
snake_case_ , out_feat_keys=snake_case_ , feature_blocks=self._feature_blocks , )
class A_ ( lowerCAmelCase_ ):
def lowercase ( self : Any , snake_case_ : str ):
_UpperCAmelCase = x.split("-" )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self : Any , snake_case_ : str ):
# default to timm!
if x not in self:
_UpperCAmelCase = self.convert_name_to_timm(snake_case_ )
_UpperCAmelCase = partial(lambda: (timm.create_model(snake_case_ , pretrained=snake_case_ ).eval(), None) )
else:
_UpperCAmelCase = super().__getitem__(snake_case_ )
return val
class A_ ( lowerCAmelCase_ ):
def __getitem__( self : Tuple , snake_case_ : str ):
if "seer" in x and "in1k" not in x:
_UpperCAmelCase = RegNetModel
else:
_UpperCAmelCase = RegNetForImageClassification
return val
def UpperCAmelCase_ ( __lowercase : Optional[Any] , __lowercase : str , __lowercase : List[Tuple[str, str]] ) -> List[Any]:
'''simple docstring'''
for from_key, to_key in keys:
_UpperCAmelCase = from_state_dict[from_key].clone()
print(f'Copied key={from_key} to={to_key}' )
return to_state_dict
def UpperCAmelCase_ ( __lowercase : str , __lowercase : Callable[[], nn.Module] , __lowercase : Callable[[], nn.Module] , __lowercase : RegNetConfig , __lowercase : Path , __lowercase : bool = True , ) -> str:
'''simple docstring'''
print(f'Converting {name}...' )
with torch.no_grad():
_UpperCAmelCase , _UpperCAmelCase = from_model_func()
_UpperCAmelCase = our_model_func(__lowercase ).eval()
_UpperCAmelCase = ModuleTransfer(src=__lowercase , dest=__lowercase , raise_if_mismatch=__lowercase )
_UpperCAmelCase = torch.randn((1, 3, 224, 224) )
module_transfer(__lowercase )
if from_state_dict is not None:
_UpperCAmelCase = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
_UpperCAmelCase = [("0.clf.0.weight", "classifier.1.weight"), ("0.clf.0.bias", "classifier.1.bias")]
_UpperCAmelCase = manually_copy_vissl_head(__lowercase , our_model.state_dict() , __lowercase )
our_model.load_state_dict(__lowercase )
_UpperCAmelCase = our_model(__lowercase , output_hidden_states=__lowercase )
_UpperCAmelCase = (
our_outputs.logits if isinstance(__lowercase , __lowercase ) else our_outputs.last_hidden_state
)
_UpperCAmelCase = from_model(__lowercase )
_UpperCAmelCase = from_output[-1] if type(__lowercase ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
_UpperCAmelCase = our_outputs.hidden_states[-1]
assert torch.allclose(__lowercase , __lowercase ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add model" , use_temp_dir=__lowercase , )
_UpperCAmelCase = 224 if "seer" not in name else 384
# we can use the convnext one
_UpperCAmelCase = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" , size=__lowercase )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add image processor" , use_temp_dir=__lowercase , )
print(f'Pushed {name}' )
def UpperCAmelCase_ ( __lowercase : Path , __lowercase : str = None , __lowercase : bool = True ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = "imagenet-1k-id2label.json"
_UpperCAmelCase = 1000
_UpperCAmelCase = (1, num_labels)
_UpperCAmelCase = "huggingface/label-files"
_UpperCAmelCase = num_labels
_UpperCAmelCase = json.load(open(cached_download(hf_hub_url(__lowercase , __lowercase , repo_type="dataset" ) ) , "r" ) )
_UpperCAmelCase = {int(__lowercase ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
_UpperCAmelCase = partial(__lowercase , num_labels=__lowercase , idalabel=__lowercase , labelaid=__lowercase )
_UpperCAmelCase = {
"regnet-x-002": ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type="x" ),
"regnet-x-004": ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type="x" ),
"regnet-x-006": ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type="x" ),
"regnet-x-008": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type="x" ),
"regnet-x-016": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type="x" ),
"regnet-x-032": ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1008] , groups_width=48 , layer_type="x" ),
"regnet-x-040": ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1360] , groups_width=40 , layer_type="x" ),
"regnet-x-064": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1624] , groups_width=56 , layer_type="x" ),
"regnet-x-080": ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1920] , groups_width=120 , layer_type="x" ),
"regnet-x-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 , layer_type="x" ),
"regnet-x-160": ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2048] , groups_width=128 , layer_type="x" ),
"regnet-x-320": ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1344, 2520] , groups_width=168 , layer_type="x" ),
# y variant
"regnet-y-002": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ),
"regnet-y-004": ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ),
"regnet-y-006": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ),
"regnet-y-008": ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ),
"regnet-y-016": ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ),
"regnet-y-032": ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1512] , groups_width=24 ),
"regnet-y-040": ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1088] , groups_width=64 ),
"regnet-y-064": ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1296] , groups_width=72 ),
"regnet-y-080": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2016] , groups_width=56 ),
"regnet-y-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 ),
"regnet-y-160": ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1232, 3024] , groups_width=112 ),
"regnet-y-320": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
"regnet-y-320-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
"regnet-y-640-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
"regnet-y-1280-seer": RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
"regnet-y-2560-seer": RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
"regnet-y-10b-seer": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 1_1110, 2_8280] , groups_width=1010 ),
# finetuned on imagenet
"regnet-y-320-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
"regnet-y-640-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
"regnet-y-1280-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
"regnet-y-2560-seer-in1k": ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
"regnet-y-10b-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 1_1110, 2_8280] , groups_width=1010 ),
}
_UpperCAmelCase = NameToOurModelFuncMap()
_UpperCAmelCase = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(__lowercase : str , __lowercase : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
_UpperCAmelCase = torch.hub.load_state_dict_from_url(__lowercase , model_dir=str(__lowercase ) , map_location="cpu" )
_UpperCAmelCase = model_func()
# check if we have a head, if yes add it
_UpperCAmelCase = files["classy_state_dict"]["base_model"]["model"]
_UpperCAmelCase = model_state_dict["trunk"]
model.load_state_dict(__lowercase )
return model.eval(), model_state_dict["heads"]
# pretrained
_UpperCAmelCase = partial(
__lowercase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
_UpperCAmelCase = partial(
__lowercase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
_UpperCAmelCase = partial(
__lowercase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
_UpperCAmelCase = partial(
__lowercase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
_UpperCAmelCase = partial(
__lowercase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
_UpperCAmelCase = partial(
__lowercase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
_UpperCAmelCase = partial(
__lowercase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
_UpperCAmelCase = partial(
__lowercase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
__lowercase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , __lowercase , __lowercase , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
__lowercase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , __lowercase , __lowercase , __lowercase , )
return config, expected_shape
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported regnet* architecture,'''
''' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
__SCREAMING_SNAKE_CASE :Any = parser.parse_args()
__SCREAMING_SNAKE_CASE :Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 119 | 1 |
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
def update_area_of_max_square(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
A_ : Tuple = update_area_of_max_square(SCREAMING_SNAKE_CASE__ , col + 1 )
A_ : Tuple = update_area_of_max_square(row + 1 , col + 1 )
A_ : List[Any] = update_area_of_max_square(row + 1 , SCREAMING_SNAKE_CASE__ )
if mat[row][col]:
A_ : Optional[Any] = 1 + min([right, diagonal, down] )
A_ : Optional[int] = max(largest_square_area[0] , SCREAMING_SNAKE_CASE__ )
return sub_problem_sol
else:
return 0
A_ : int = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
def update_area_of_max_square_using_dp_array(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
A_ : List[str] = update_area_of_max_square_using_dp_array(SCREAMING_SNAKE_CASE__ , col + 1 , SCREAMING_SNAKE_CASE__ )
A_ : Any = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , SCREAMING_SNAKE_CASE__ )
A_ : Optional[int] = update_area_of_max_square_using_dp_array(row + 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if mat[row][col]:
A_ : Any = 1 + min([right, diagonal, down] )
A_ : Any = max(largest_square_area[0] , SCREAMING_SNAKE_CASE__ )
A_ : Tuple = sub_problem_sol
return sub_problem_sol
else:
return 0
A_ : str = [0]
A_ : Union[str, Any] = [[-1] * cols for _ in range(SCREAMING_SNAKE_CASE__ )]
update_area_of_max_square_using_dp_array(0 , 0 , SCREAMING_SNAKE_CASE__ )
return largest_square_area[0]
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : str = [[0] * (cols + 1) for _ in range(rows + 1 )]
A_ : Optional[int] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
A_ : Tuple = dp_array[row][col + 1]
A_ : str = dp_array[row + 1][col + 1]
A_ : int = dp_array[row + 1][col]
if mat[row][col] == 1:
A_ : Union[str, Any] = 1 + min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A_ : Any = max(dp_array[row][col] , SCREAMING_SNAKE_CASE__ )
else:
A_ : List[Any] = 0
return largest_square_area
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : Optional[Any] = [0] * (cols + 1)
A_ : Union[str, Any] = [0] * (cols + 1)
A_ : int = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
A_ : Union[str, Any] = current_row[col + 1]
A_ : Union[str, Any] = next_row[col + 1]
A_ : List[Any] = next_row[col]
if mat[row][col] == 1:
A_ : Tuple = 1 + min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A_ : List[str] = max(current_row[col] , SCREAMING_SNAKE_CASE__ )
else:
A_ : Any = 0
A_ : int = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 590 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Any, _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Union[str, Any]=1_3, _UpperCAmelCase : Union[str, Any]=3_2, _UpperCAmelCase : Optional[Any]=2, _UpperCAmelCase : Tuple=3, _UpperCAmelCase : str=1_6, _UpperCAmelCase : Tuple=[1, 2, 1], _UpperCAmelCase : List[str]=[2, 2, 4], _UpperCAmelCase : Tuple=2, _UpperCAmelCase : str=2.0, _UpperCAmelCase : Optional[int]=True, _UpperCAmelCase : int=0.0, _UpperCAmelCase : Any=0.0, _UpperCAmelCase : Optional[int]=0.1, _UpperCAmelCase : int="gelu", _UpperCAmelCase : Any=False, _UpperCAmelCase : Any=True, _UpperCAmelCase : Tuple=0.02, _UpperCAmelCase : Any=1E-5, _UpperCAmelCase : Optional[int]=True, _UpperCAmelCase : List[Any]=None, _UpperCAmelCase : str=True, _UpperCAmelCase : Union[str, Any]=1_0, _UpperCAmelCase : List[str]=8, _UpperCAmelCase : Union[str, Any]=["stage1", "stage2", "stage3"], _UpperCAmelCase : Any=[1, 2, 3], ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = parent
SCREAMING_SNAKE_CASE__ : Tuple = batch_size
SCREAMING_SNAKE_CASE__ : List[Any] = image_size
SCREAMING_SNAKE_CASE__ : Optional[int] = patch_size
SCREAMING_SNAKE_CASE__ : List[str] = num_channels
SCREAMING_SNAKE_CASE__ : Optional[int] = embed_dim
SCREAMING_SNAKE_CASE__ : List[Any] = depths
SCREAMING_SNAKE_CASE__ : List[str] = num_heads
SCREAMING_SNAKE_CASE__ : str = window_size
SCREAMING_SNAKE_CASE__ : Any = mlp_ratio
SCREAMING_SNAKE_CASE__ : List[str] = qkv_bias
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Tuple = drop_path_rate
SCREAMING_SNAKE_CASE__ : Dict = hidden_act
SCREAMING_SNAKE_CASE__ : List[str] = use_absolute_embeddings
SCREAMING_SNAKE_CASE__ : Tuple = patch_norm
SCREAMING_SNAKE_CASE__ : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : List[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : List[str] = is_training
SCREAMING_SNAKE_CASE__ : List[Any] = scope
SCREAMING_SNAKE_CASE__ : Optional[int] = use_labels
SCREAMING_SNAKE_CASE__ : Dict = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Optional[int] = encoder_stride
SCREAMING_SNAKE_CASE__ : List[Any] = out_features
SCREAMING_SNAKE_CASE__ : Dict = out_indices
def A_ ( self : List[str] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size], self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_config()
return config, pixel_values, labels
def A_ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, embed_dim=self.embed_dim, depths=self.depths, num_heads=self.num_heads, window_size=self.window_size, mlp_ratio=self.mlp_ratio, qkv_bias=self.qkv_bias, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, drop_path_rate=self.drop_path_rate, hidden_act=self.hidden_act, use_absolute_embeddings=self.use_absolute_embeddings, path_norm=self.patch_norm, layer_norm_eps=self.layer_norm_eps, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, out_features=self.out_features, out_indices=self.out_indices, )
def A_ ( self : Dict, _UpperCAmelCase : int, _UpperCAmelCase : str, _UpperCAmelCase : Any ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = MaskFormerSwinModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, expected_seq_len, expected_dim) )
def A_ ( self : Optional[int], _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Any, _UpperCAmelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = MaskFormerSwinBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [1_3, 1_6, 1_6, 1_6] )
# verify channels
self.parent.assertEqual(len(model.channels ), len(config.out_features ) )
self.parent.assertListEqual(model.channels, [1_6, 3_2, 6_4] )
# verify ValueError
with self.parent.assertRaises(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ : List[Any] = ["stem"]
SCREAMING_SNAKE_CASE__ : str = MaskFormerSwinBackbone(config=_UpperCAmelCase )
def A_ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE__ : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
UpperCAmelCase_ = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
def A_ ( self : int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = MaskFormerSwinModelTester(self )
SCREAMING_SNAKE_CASE__ : Any = ConfigTester(self, config_class=_UpperCAmelCase, embed_dim=3_7 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"
" `nn.DataParallel`"
) )
def A_ ( self : Any ) -> List[Any]:
"""simple docstring"""
pass
def A_ ( self : Tuple ) -> Any:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A_ ( self : int ) -> Optional[Any]:
"""simple docstring"""
return
def A_ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def A_ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_UpperCAmelCase )
@unittest.skip("Swin does not use inputs_embeds" )
def A_ ( self : Any ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip("Swin does not support feedforward chunking" )
def A_ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
pass
def A_ ( self : Any ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[Any] = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase, nn.Linear ) )
def A_ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Dict = model_class(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : str = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1], _UpperCAmelCase )
@unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions" )
def A_ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="MaskFormerSwin is only used as an internal backbone" )
def A_ ( self : Dict ) -> List[str]:
"""simple docstring"""
pass
def A_ ( self : List[str], _UpperCAmelCase : Optional[int], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Dict = model(**self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ : List[Any] = outputs.hidden_states
SCREAMING_SNAKE_CASE__ : Optional[Any] = getattr(
self.model_tester, "expected_num_hidden_layers", len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_UpperCAmelCase ), _UpperCAmelCase )
# Swin has a different seq_length
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size, collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE__ : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [num_patches, self.model_tester.embed_dim], )
def A_ ( self : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size, collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : str = True
self.check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ : Optional[int] = True
self.check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
def A_ ( self : Tuple ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Optional[int] = 3
SCREAMING_SNAKE_CASE__ : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size, collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
SCREAMING_SNAKE_CASE__ : str = (
config.patch_size
if isinstance(config.patch_size, collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE__ : Dict = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
SCREAMING_SNAKE_CASE__ : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[Any] = True
self.check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ : Any = True
self.check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, (padded_height, padded_width) )
@unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints" )
def A_ ( self : List[Any] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def A_ ( self : Dict ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def A_ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
pass
def A_ ( self : int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(_UpperCAmelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Dict = 0
return t
def check_equivalence(_UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Tuple, _UpperCAmelCase : Optional[Any]={} ):
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : str = model(**_UpperCAmelCase, return_dict=_UpperCAmelCase, **_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = model(**_UpperCAmelCase, return_dict=_UpperCAmelCase, **_UpperCAmelCase ).to_tuple()
def recursive_check(_UpperCAmelCase : int, _UpperCAmelCase : Dict ):
if isinstance(_UpperCAmelCase, (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_UpperCAmelCase, _UpperCAmelCase ):
recursive_check(_UpperCAmelCase, _UpperCAmelCase )
elif isinstance(_UpperCAmelCase, _UpperCAmelCase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values(), dict_object.values() ):
recursive_check(_UpperCAmelCase, _UpperCAmelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_UpperCAmelCase ), set_nan_tensor_to_zero(_UpperCAmelCase ), atol=1E-5 ), msg=(
"Tuple and dict output are not equal. Difference:"
F''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'''
F''' {torch.isnan(_UpperCAmelCase ).any()} and `inf`: {torch.isinf(_UpperCAmelCase )}. Dict has'''
F''' `nan`: {torch.isnan(_UpperCAmelCase ).any()} and `inf`: {torch.isinf(_UpperCAmelCase )}.'''
), )
recursive_check(_UpperCAmelCase, _UpperCAmelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase )
check_equivalence(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase, return_labels=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase, return_labels=_UpperCAmelCase )
check_equivalence(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase )
check_equivalence(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, {"output_hidden_states": True} )
SCREAMING_SNAKE_CASE__ : Dict = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase, return_labels=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase, return_labels=_UpperCAmelCase )
check_equivalence(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, {"output_hidden_states": True} )
@require_torch
class lowerCamelCase (unittest.TestCase , __lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = (MaskFormerSwinBackbone,) if is_torch_available() else ()
UpperCAmelCase_ = MaskFormerSwinConfig
def A_ ( self : Any ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = MaskFormerSwinModelTester(self )
def A_ ( self : int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Any = inputs_dict["pixel_values"].shape[0]
for backbone_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : str = backbone_class(_UpperCAmelCase )
backbone.to(_UpperCAmelCase )
backbone.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = backbone(**_UpperCAmelCase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps, _UpperCAmelCase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps, backbone.channels ):
self.assertTrue(feature_map.shape[:2], (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
SCREAMING_SNAKE_CASE__ : Optional[int] = backbone(**_UpperCAmelCase, output_hidden_states=_UpperCAmelCase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ), len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:], backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels), (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
SCREAMING_SNAKE_CASE__ : int = backbone(**_UpperCAmelCase, output_attentions=_UpperCAmelCase )
self.assertIsNotNone(outputs.attentions )
| 663 | 0 |
'''simple docstring'''
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : Optional[int] ) -> List[Any]:
# Load checkpoint
UpperCAmelCase : Optional[Any] = torch.load(_lowerCAmelCase , map_location='''cpu''' )
UpperCAmelCase : str = chkpt['''model''']
# We have the base model one level deeper than the original XLM repository
UpperCAmelCase : Optional[Any] = {}
for k, v in state_dict.items():
if "pred_layer" in k:
UpperCAmelCase : Optional[Any] = v
else:
UpperCAmelCase : List[Any] = v
UpperCAmelCase : Optional[Any] = chkpt['''params''']
UpperCAmelCase : Any = {n: v for n, v in config.items() if not isinstance(_lowerCAmelCase , (torch.FloatTensor, numpy.ndarray) )}
UpperCAmelCase : int = chkpt['''dico_word2id''']
UpperCAmelCase : str = {s + '''</w>''' if s.find('''@@''' ) == -1 and i > 13 else s.replace('''@@''' , '''''' ): i for s, i in vocab.items()}
# Save pytorch-model
UpperCAmelCase : Optional[Any] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
UpperCAmelCase : Any = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
UpperCAmelCase : Dict = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''vocab_file''']
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(_lowerCAmelCase , _lowerCAmelCase )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(_lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(_lowerCAmelCase , indent=2 ) + '''\n''' )
print(f"""Save vocab file to {pytorch_config_dump_path}""" )
with open(_lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(_lowerCAmelCase , indent=2 ) + '''\n''' )
if __name__ == "__main__":
UpperCamelCase__: List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xlm_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCamelCase__: str = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 528 |
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__: Optional[Any] = logging.get_logger(__name__)
UpperCamelCase__: Tuple = {
"huggingface/time-series-transformer-tourism-monthly": (
"https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = """time_series_transformer"""
lowerCamelCase__ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : str , __snake_case : Optional[int] = None , __snake_case : Optional[int] = None , __snake_case : str = "student_t" , __snake_case : str = "nll" , __snake_case : int = 1 , __snake_case : List[int] = [1, 2, 3, 4, 5, 6, 7] , __snake_case : Optional[Union[str, bool]] = "mean" , __snake_case : int = 0 , __snake_case : int = 0 , __snake_case : int = 0 , __snake_case : int = 0 , __snake_case : Optional[List[int]] = None , __snake_case : Optional[List[int]] = None , __snake_case : int = 32 , __snake_case : int = 32 , __snake_case : int = 2 , __snake_case : int = 2 , __snake_case : int = 2 , __snake_case : int = 2 , __snake_case : bool = True , __snake_case : str = "gelu" , __snake_case : int = 64 , __snake_case : float = 0.1 , __snake_case : float = 0.1 , __snake_case : float = 0.1 , __snake_case : float = 0.1 , __snake_case : float = 0.1 , __snake_case : int = 100 , __snake_case : float = 0.02 , __snake_case : Optional[Any]=True , **__snake_case : List[Any] , ) -> List[str]:
# time series specific configuration
UpperCAmelCase : List[Any] = prediction_length
UpperCAmelCase : List[Any] = context_length or prediction_length
UpperCAmelCase : Tuple = distribution_output
UpperCAmelCase : Optional[Any] = loss
UpperCAmelCase : Tuple = input_size
UpperCAmelCase : Optional[int] = num_time_features
UpperCAmelCase : Dict = lags_sequence
UpperCAmelCase : Any = scaling
UpperCAmelCase : Tuple = num_dynamic_real_features
UpperCAmelCase : Any = num_static_real_features
UpperCAmelCase : Optional[int] = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(__snake_case ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
UpperCAmelCase : Any = cardinality
else:
UpperCAmelCase : Optional[Any] = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(__snake_case ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
UpperCAmelCase : Optional[int] = embedding_dimension
else:
UpperCAmelCase : Optional[Any] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCAmelCase : List[Any] = num_parallel_samples
# Transformer architecture configuration
UpperCAmelCase : int = input_size * len(__snake_case ) + self._number_of_features
UpperCAmelCase : int = d_model
UpperCAmelCase : str = encoder_attention_heads
UpperCAmelCase : str = decoder_attention_heads
UpperCAmelCase : List[str] = encoder_ffn_dim
UpperCAmelCase : Any = decoder_ffn_dim
UpperCAmelCase : Any = encoder_layers
UpperCAmelCase : str = decoder_layers
UpperCAmelCase : Optional[int] = dropout
UpperCAmelCase : Union[str, Any] = attention_dropout
UpperCAmelCase : List[Any] = activation_dropout
UpperCAmelCase : Optional[int] = encoder_layerdrop
UpperCAmelCase : Dict = decoder_layerdrop
UpperCAmelCase : Tuple = activation_function
UpperCAmelCase : Dict = init_std
UpperCAmelCase : Optional[int] = use_cache
super().__init__(is_encoder_decoder=__snake_case , **__snake_case )
@property
def A ( self : Union[str, Any] ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 528 | 1 |
"""simple docstring"""
def lowercase__ ( lowerCamelCase : int , lowerCamelCase : int ) -> int:
return int(input_a == input_a == 0 )
def lowercase__ ( ) -> None:
print("Truth Table of NOR Gate:" )
print("| Input 1 | Input 2 | Output |" )
print(F"| 0 | 0 | {nor_gate(0 , 0 )} |" )
print(F"| 0 | 1 | {nor_gate(0 , 1 )} |" )
print(F"| 1 | 0 | {nor_gate(1 , 0 )} |" )
print(F"| 1 | 1 | {nor_gate(1 , 1 )} |" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 308 |
import re
from filelock import FileLock
try:
import nltk
UpperCamelCase = True
except (ImportError, ModuleNotFoundError):
UpperCamelCase = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def __lowerCamelCase ( __lowerCAmelCase : str ) -> str:
re.sub("""<n>""" , """""" , __lowerCAmelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__lowerCAmelCase ) )
| 269 | 0 |
'''simple docstring'''
from collections import deque
class snake_case :
"""simple docstring"""
def __init__( self : Dict , __A : str , __A : int , __A : int ):
__UpperCamelCase = process_name # process name
__UpperCamelCase = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
__UpperCamelCase = arrival_time
__UpperCamelCase = burst_time # remaining burst time
__UpperCamelCase = 0 # total time of the process wait in ready queue
__UpperCamelCase = 0 # time from arrival time to completion time
class snake_case :
"""simple docstring"""
def __init__( self : Dict , __A : int , __A : list[int] , __A : deque[Process] , __A : int , ):
# total number of mlfq's queues
__UpperCamelCase = number_of_queues
# time slice of queues that round robin algorithm applied
__UpperCamelCase = time_slices
# unfinished process is in this ready_queue
__UpperCamelCase = queue
# current time
__UpperCamelCase = current_time
# finished process is in this sequence queue
__UpperCamelCase = deque()
def _lowerCamelCase ( self : List[Any] ):
__UpperCamelCase = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def _lowerCamelCase ( self : int , __A : list[Process] ):
__UpperCamelCase = []
for i in range(len(__A ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def _lowerCamelCase ( self : Dict , __A : list[Process] ):
__UpperCamelCase = []
for i in range(len(__A ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def _lowerCamelCase ( self : Union[str, Any] , __A : list[Process] ):
__UpperCamelCase = []
for i in range(len(__A ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def _lowerCamelCase ( self : Optional[Any] , __A : deque[Process] ):
return [q.burst_time for q in queue]
def _lowerCamelCase ( self : Optional[Any] , __A : Process ):
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def _lowerCamelCase ( self : Union[str, Any] , __A : deque[Process] ):
__UpperCamelCase = deque() # sequence deque of finished process
while len(__A ) != 0:
__UpperCamelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(__A )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
__UpperCamelCase = 0
# set the process's turnaround time because it is finished
__UpperCamelCase = self.current_time - cp.arrival_time
# set the completion time
__UpperCamelCase = self.current_time
# add the process to queue that has finished queue
finished.append(__A )
self.finish_queue.extend(__A ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def _lowerCamelCase ( self : Tuple , __A : deque[Process] , __A : int ):
__UpperCamelCase = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(__A ) ):
__UpperCamelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(__A )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
__UpperCamelCase = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(__A )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
__UpperCamelCase = 0
# set the finish time
__UpperCamelCase = self.current_time
# update the process' turnaround time because it is finished
__UpperCamelCase = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(__A )
self.finish_queue.extend(__A ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def _lowerCamelCase ( self : int ):
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
__UpperCamelCase , __UpperCamelCase = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
a__ : Optional[Any] =Process('''P1''', 0, 53)
a__ : str =Process('''P2''', 0, 17)
a__ : int =Process('''P3''', 0, 68)
a__ : Dict =Process('''P4''', 0, 24)
a__ : str =3
a__ : List[str] =[17, 25]
a__ : Any =deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])})
a__ : Dict =Process('''P1''', 0, 53)
a__ : Dict =Process('''P2''', 0, 17)
a__ : Dict =Process('''P3''', 0, 68)
a__ : List[str] =Process('''P4''', 0, 24)
a__ : str =3
a__ : Any =[17, 25]
a__ : Tuple =deque([Pa, Pa, Pa, Pa])
a__ : Dict =MLFQ(number_of_queues, time_slices, queue, 0)
a__ : Tuple =mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f'waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print completion times of processes(P1, P2, P3, P4)
print(
f'completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f'turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print sequence of finished processes
print(
f'sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'
)
| 434 |
'''simple docstring'''
def lowercase__ ( __lowercase : list[int] , __lowercase : list[int] ) -> None:
"""simple docstring"""
__UpperCamelCase = len(__lowercase )
print('The following activities are selected:' )
# The first activity is always selected
__UpperCamelCase = 0
print(__lowercase , end=',' )
# Consider rest of the activities
for j in range(__lowercase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(__lowercase , end=',' )
__UpperCamelCase = j
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ : Any =[1, 3, 0, 5, 8, 5]
a__ : Dict =[2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 434 | 1 |
"""simple docstring"""
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : List[str] = logging.get_logger(__name__)
A_ : Optional[Any] = {
'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json',
'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json',
}
class lowerCAmelCase__ ( _UpperCAmelCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : List[str] = '''encodec'''
def __init__( self : List[str] , _SCREAMING_SNAKE_CASE : List[str]=[1.5, 3.0, 6.0, 1_2.0, 2_4.0] , _SCREAMING_SNAKE_CASE : Tuple=24_000 , _SCREAMING_SNAKE_CASE : List[str]=1 , _SCREAMING_SNAKE_CASE : int=False , _SCREAMING_SNAKE_CASE : Dict=None , _SCREAMING_SNAKE_CASE : Union[str, Any]=None , _SCREAMING_SNAKE_CASE : str=128 , _SCREAMING_SNAKE_CASE : Union[str, Any]=32 , _SCREAMING_SNAKE_CASE : int=1 , _SCREAMING_SNAKE_CASE : List[str]=[8, 5, 4, 2] , _SCREAMING_SNAKE_CASE : Any="weight_norm" , _SCREAMING_SNAKE_CASE : Optional[Any]=7 , _SCREAMING_SNAKE_CASE : List[str]=7 , _SCREAMING_SNAKE_CASE : Dict=3 , _SCREAMING_SNAKE_CASE : Any=2 , _SCREAMING_SNAKE_CASE : Any=True , _SCREAMING_SNAKE_CASE : Tuple="reflect" , _SCREAMING_SNAKE_CASE : Any=2 , _SCREAMING_SNAKE_CASE : Any=2 , _SCREAMING_SNAKE_CASE : Optional[Any]=1.0 , _SCREAMING_SNAKE_CASE : int=1_024 , _SCREAMING_SNAKE_CASE : Optional[Any]=None , _SCREAMING_SNAKE_CASE : int=True , **_SCREAMING_SNAKE_CASE : Optional[int] , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = target_bandwidths
SCREAMING_SNAKE_CASE : Optional[int] = sampling_rate
SCREAMING_SNAKE_CASE : str = audio_channels
SCREAMING_SNAKE_CASE : List[str] = normalize
SCREAMING_SNAKE_CASE : Any = chunk_length_s
SCREAMING_SNAKE_CASE : Any = overlap
SCREAMING_SNAKE_CASE : str = hidden_size
SCREAMING_SNAKE_CASE : Dict = num_filters
SCREAMING_SNAKE_CASE : Optional[int] = num_residual_layers
SCREAMING_SNAKE_CASE : Tuple = upsampling_ratios
SCREAMING_SNAKE_CASE : int = norm_type
SCREAMING_SNAKE_CASE : Optional[Any] = kernel_size
SCREAMING_SNAKE_CASE : Optional[int] = last_kernel_size
SCREAMING_SNAKE_CASE : Optional[int] = residual_kernel_size
SCREAMING_SNAKE_CASE : Union[str, Any] = dilation_growth_rate
SCREAMING_SNAKE_CASE : List[str] = use_causal_conv
SCREAMING_SNAKE_CASE : Tuple = pad_mode
SCREAMING_SNAKE_CASE : Union[str, Any] = compress
SCREAMING_SNAKE_CASE : Dict = num_lstm_layers
SCREAMING_SNAKE_CASE : Tuple = trim_right_ratio
SCREAMING_SNAKE_CASE : List[Any] = codebook_size
SCREAMING_SNAKE_CASE : Dict = codebook_dim if codebook_dim is not None else hidden_size
SCREAMING_SNAKE_CASE : int = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**_SCREAMING_SNAKE_CASE )
@property
def _lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def _lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def _lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
return int(1_000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 265 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
@dataclass
class lowercase :
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
@dataclass
class lowercase :
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'train'
_SCREAMING_SNAKE_CASE = 'dev'
_SCREAMING_SNAKE_CASE = 'test'
class lowercase :
@staticmethod
def _snake_case ( lowercase , lowercase ) -> List[InputExample]:
raise NotImplementedError
@staticmethod
def _snake_case ( lowercase ) -> List[str]:
raise NotImplementedError
@staticmethod
def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase=False , lowercase="[CLS]" , lowercase=1 , lowercase="[SEP]" , lowercase=False , lowercase=False , lowercase=0 , lowercase=0 , lowercase=-100 , lowercase=0 , lowercase=True , ) -> List[InputFeatures]:
lowerCAmelCase = {label: i for i, label in enumerate(lowercase )}
lowerCAmelCase = []
for ex_index, example in enumerate(lowercase ):
if ex_index % 10_000 == 0:
logger.info("""Writing example %d of %d""" , lowercase , len(lowercase ) )
lowerCAmelCase = []
lowerCAmelCase = []
for word, label in zip(example.words , example.labels ):
lowerCAmelCase = tokenizer.tokenize(lowercase )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(lowercase ) > 0:
tokens.extend(lowercase )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(lowercase ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
lowerCAmelCase = tokenizer.num_special_tokens_to_add()
if len(lowercase ) > max_seq_length - special_tokens_count:
lowerCAmelCase = tokens[: (max_seq_length - special_tokens_count)]
lowerCAmelCase = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
lowerCAmelCase = [sequence_a_segment_id] * len(lowercase )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
lowerCAmelCase = [cls_token] + tokens
lowerCAmelCase = [pad_token_label_id] + label_ids
lowerCAmelCase = [cls_token_segment_id] + segment_ids
lowerCAmelCase = tokenizer.convert_tokens_to_ids(lowercase )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
lowerCAmelCase = [1 if mask_padding_with_zero else 0] * len(lowercase )
# Zero-pad up to the sequence length.
lowerCAmelCase = max_seq_length - len(lowercase )
if pad_on_left:
lowerCAmelCase = ([pad_token] * padding_length) + input_ids
lowerCAmelCase = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
lowerCAmelCase = ([pad_token_segment_id] * padding_length) + segment_ids
lowerCAmelCase = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(lowercase ) == max_seq_length
assert len(lowercase ) == max_seq_length
assert len(lowercase ) == max_seq_length
assert len(lowercase ) == max_seq_length
if ex_index < 5:
logger.info("""*** Example ***""" )
logger.info("""guid: %s""" , example.guid )
logger.info("""tokens: %s""" , """ """.join([str(lowercase ) for x in tokens] ) )
logger.info("""input_ids: %s""" , """ """.join([str(lowercase ) for x in input_ids] ) )
logger.info("""input_mask: %s""" , """ """.join([str(lowercase ) for x in input_mask] ) )
logger.info("""segment_ids: %s""" , """ """.join([str(lowercase ) for x in segment_ids] ) )
logger.info("""label_ids: %s""" , """ """.join([str(lowercase ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
lowerCAmelCase = None
features.append(
InputFeatures(
input_ids=lowercase , attention_mask=lowercase , token_type_ids=lowercase , label_ids=lowercase ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = nn.CrossEntropyLoss().ignore_index
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase = None , lowercase=False , lowercase = Split.train , ) -> List[str]:
# Load data features from cache or dataset file
lowerCAmelCase = os.path.join(
lowercase , """cached_{}_{}_{}""".format(mode.value , tokenizer.__class__.__name__ , str(lowercase ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCAmelCase = cached_features_file + """.lock"""
with FileLock(lowercase ):
if os.path.exists(lowercase ) and not overwrite_cache:
logger.info(f'Loading features from cached file {cached_features_file}' )
lowerCAmelCase = torch.load(lowercase )
else:
logger.info(f'Creating features from dataset file at {data_dir}' )
lowerCAmelCase = token_classification_task.read_examples_from_file(lowercase , lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
lowerCAmelCase = token_classification_task.convert_examples_to_features(
lowercase , lowercase , lowercase , lowercase , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=lowercase , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f'Saving features into cached file {cached_features_file}' )
torch.save(self.features , lowercase )
def __len__( self ) -> int:
return len(self.features )
def __getitem__( self , lowercase ) -> InputFeatures:
return self.features[i]
if is_tf_available():
import tensorflow as tf
class lowercase :
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = -100
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase = None , lowercase=False , lowercase = Split.train , ) -> Any:
lowerCAmelCase = token_classification_task.read_examples_from_file(lowercase , lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
lowerCAmelCase = token_classification_task.convert_examples_to_features(
lowercase , lowercase , lowercase , lowercase , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=lowercase , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
lowerCAmelCase = tf.data.Dataset.from_generator(
lowercase , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa}, tf.intaa) , (
{"""input_ids""": tf.TensorShape([None] ), """attention_mask""": tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
lowerCAmelCase = tf.data.Dataset.from_generator(
lowercase , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa, """token_type_ids""": tf.intaa}, tf.intaa) , (
{
"""input_ids""": tf.TensorShape([None] ),
"""attention_mask""": tf.TensorShape([None] ),
"""token_type_ids""": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def _snake_case ( self ) -> List[str]:
lowerCAmelCase = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self ) -> Optional[int]:
return len(self.features )
def __getitem__( self , lowercase ) -> InputFeatures:
return self.features[i]
| 532 | 0 |
"""simple docstring"""
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
__lowercase : Tuple = {
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def SCREAMING_SNAKE_CASE ( snake_case, snake_case, snake_case, snake_case, snake_case, snake_case):
if got_ver is None or want_ver is None:
raise ValueError(
f"Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"
f" reinstalling {pkg}.")
if not ops[op](version.parse(snake_case), version.parse(snake_case)):
raise ImportError(
f"{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}")
def SCREAMING_SNAKE_CASE ( snake_case, snake_case = None):
__snake_case = f"\n{hint}" if hint is not None else ''''''
# non-versioned check
if re.match(R'''^[\w_\-\d]+$''', snake_case):
__snake_case , __snake_case , __snake_case = requirement, None, None
else:
__snake_case = re.findall(R'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''', snake_case)
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
f" got {requirement}")
__snake_case , __snake_case = match[0]
__snake_case = want_full.split(''',''') # there could be multiple requirements
__snake_case = {}
for w in want_range:
__snake_case = re.findall(R'''^([\s!=<>]{1,2})(.+)''', snake_case)
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
f" but got {requirement}")
__snake_case , __snake_case = match[0]
__snake_case = want_ver
if op not in ops:
raise ValueError(f"{requirement}: need one of {list(ops.keys())}, but got {op}")
# special case
if pkg == "python":
__snake_case = '''.'''.join([str(snake_case) for x in sys.version_info[:3]])
for op, want_ver in wanted.items():
_compare_versions(snake_case, snake_case, snake_case, snake_case, snake_case, snake_case)
return
# check if any version is installed
try:
__snake_case = importlib.metadata.version(snake_case)
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f"The '{requirement}' distribution was not found and is required by this application. {hint}")
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(snake_case, snake_case, snake_case, snake_case, snake_case, snake_case)
def SCREAMING_SNAKE_CASE ( snake_case):
__snake_case = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(snake_case, snake_case) | 93 | """simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _A ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , A_ : List[str] , A_ : List[Any]=100 , A_ : Any=13 , A_ : Dict=30 , A_ : Optional[int]=2 , A_ : str=3 , A_ : Tuple=True , A_ : str=True , A_ : Union[str, Any]=32 , A_ : int=5 , A_ : List[Any]=4 , A_ : Optional[Any]=37 , A_ : Any="gelu" , A_ : List[str]=0.1 , A_ : int=0.1 , A_ : Tuple=10 , A_ : int=0.02 , A_ : Tuple=3 , ) -> Optional[int]:
__snake_case = parent
__snake_case = vocab_size
__snake_case = batch_size
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = is_training
__snake_case = use_labels
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = type_sequence_label_size
__snake_case = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__snake_case = (image_size // patch_size) ** 2
__snake_case = num_patches + 1
def lowercase ( self : Optional[Any] ) -> List[str]:
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def lowercase ( self : Optional[int] , A_ : str , A_ : str , A_ : List[str] ) -> List[Any]:
__snake_case = FlaxBeitModel(config=A_ )
__snake_case = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self : List[str] , A_ : Any , A_ : Any , A_ : str ) -> str:
__snake_case = FlaxBeitForMaskedImageModeling(config=A_ )
__snake_case = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def lowercase ( self : str , A_ : List[Any] , A_ : Optional[int] , A_ : List[str] ) -> str:
__snake_case = self.type_sequence_label_size
__snake_case = FlaxBeitForImageClassification(config=A_ )
__snake_case = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__snake_case = 1
__snake_case = FlaxBeitForImageClassification(A_ )
__snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__snake_case = model(A_ )
def lowercase ( self : Dict ) -> List[str]:
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class _A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Dict = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def lowercase ( self : List[Any] ) -> None:
__snake_case = FlaxBeitModelTester(self )
__snake_case = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def lowercase ( self : Tuple ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def lowercase ( self : int ) -> Optional[Any]:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(A_ )
__snake_case = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A_ )
def lowercase ( self : Union[str, Any] ) -> int:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__snake_case = self._prepare_for_class(A_ , A_ )
__snake_case = model_class(A_ )
@jax.jit
def model_jitted(A_ : Union[str, Any] , **A_ : Union[str, Any] ):
return model(pixel_values=A_ , **A_ )
with self.subTest('''JIT Enabled''' ):
__snake_case = model_jitted(**A_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__snake_case = model_jitted(**A_ ).to_tuple()
self.assertEqual(len(A_ ) , len(A_ ) )
for jitted_output, output in zip(A_ , A_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowercase ( self : str ) -> List[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def lowercase ( self : List[Any] ) -> Union[str, Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def lowercase ( self : str ) -> int:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
@slow
def lowercase ( self : int ) -> Optional[Any]:
for model_class_name in self.all_model_classes:
__snake_case = model_class_name.from_pretrained('''microsoft/beit-base-patch16-224''' )
__snake_case = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(A_ )
def SCREAMING_SNAKE_CASE ( ):
__snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_vision
@require_flax
class _A ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase ( self : str ) -> str:
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def lowercase ( self : Union[str, Any] ) -> List[str]:
__snake_case = FlaxBeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' )
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=A_ , return_tensors='''np''' ).pixel_values
# prepare bool_masked_pos
__snake_case = np.ones((1, 196) , dtype=A_ )
# forward pass
__snake_case = model(pixel_values=A_ , bool_masked_pos=A_ )
__snake_case = outputs.logits
# verify the logits
__snake_case = (1, 196, 8_192)
self.assertEqual(logits.shape , A_ )
__snake_case = np.array(
[[-3.24_37, 0.50_72, -13.91_74], [-3.24_56, 0.49_48, -13.94_01], [-3.20_33, 0.51_21, -13.85_50]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , A_ , atol=1E-2 ) )
@slow
def lowercase ( self : List[Any] ) -> List[str]:
__snake_case = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' )
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=A_ , return_tensors='''np''' )
# forward pass
__snake_case = model(**A_ )
__snake_case = outputs.logits
# verify the logits
__snake_case = (1, 1_000)
self.assertEqual(logits.shape , A_ )
__snake_case = np.array([-1.23_85, -1.09_87, -1.01_08] )
self.assertTrue(np.allclose(logits[0, :3] , A_ , atol=1E-4 ) )
__snake_case = 281
self.assertEqual(logits.argmax(-1 ).item() , A_ )
@slow
def lowercase ( self : int ) -> str:
__snake_case = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' )
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=A_ , return_tensors='''np''' )
# forward pass
__snake_case = model(**A_ )
__snake_case = outputs.logits
# verify the logits
__snake_case = (1, 21_841)
self.assertEqual(logits.shape , A_ )
__snake_case = np.array([1.68_81, -0.27_87, 0.59_01] )
self.assertTrue(np.allclose(logits[0, :3] , A_ , atol=1E-4 ) )
__snake_case = 2_396
self.assertEqual(logits.argmax(-1 ).item() , A_ ) | 93 | 1 |
from math import sqrt
def A ( __UpperCamelCase = 1_000_000 ) -> List[str]:
A__ = 0
A__ = 0
A__ = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(__UpperCamelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'{solution() = }')
| 9 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCamelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase : List[str] = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")\n >>> pipe_prior.to(\"cuda\")\n >>> prompt = \"red cat, 4k photo\"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")\n >>> pipe.to(\"cuda\")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save(\"cat.png\")\n ```\n"
def _SCREAMING_SNAKE_CASE ( lowercase : Any , lowercase : str , lowercase : Any=8 ):
'''simple docstring'''
lowerCamelCase_ = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowerCamelCase_ = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class A( UpperCamelCase ):
'''simple docstring'''
def __init__( self : str , A_ : UNetaDConditionModel , A_ : DDPMScheduler , A_ : VQModel , ) -> List[str]:
"""simple docstring"""
super().__init__()
self.register_modules(
unet=A_ , scheduler=A_ , movq=A_ , )
lowerCamelCase_ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def a__ ( self : List[Any] , A_ : Tuple , A_ : Dict , A_ : List[Any] , A_ : int , A_ : Any , A_ : Tuple ) -> Any:
"""simple docstring"""
if latents is None:
lowerCamelCase_ = randn_tensor(A_ , generator=A_ , device=A_ , dtype=A_ )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
lowerCamelCase_ = latents.to(A_ )
lowerCamelCase_ = latents * scheduler.init_noise_sigma
return latents
def a__ ( self : int , A_ : str=0 ) -> Optional[int]:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowerCamelCase_ = torch.device(f"""cuda:{gpu_id}""" )
lowerCamelCase_ = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(A_ , A_ )
def a__ ( self : Tuple , A_ : Union[str, Any]=0 ) -> Dict:
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
lowerCamelCase_ = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=A_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowerCamelCase_ = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowerCamelCase_ , lowerCamelCase_ = cpu_offload_with_hook(A_ , A_ , prev_module_hook=A_ )
# We'll offload the last model manually.
lowerCamelCase_ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def a__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(A_ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(A_ )
def __call__( self : List[Any] , A_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , A_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , A_ : int = 512 , A_ : int = 512 , A_ : int = 100 , A_ : float = 4.0 , A_ : int = 1 , A_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A_ : Optional[torch.FloatTensor] = None , A_ : Optional[str] = "pil" , A_ : bool = True , ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = self._execution_device
lowerCamelCase_ = guidance_scale > 1.0
if isinstance(A_ , A_ ):
lowerCamelCase_ = torch.cat(A_ , dim=0 )
lowerCamelCase_ = image_embeds.shape[0] * num_images_per_prompt
if isinstance(A_ , A_ ):
lowerCamelCase_ = torch.cat(A_ , dim=0 )
if do_classifier_free_guidance:
lowerCamelCase_ = image_embeds.repeat_interleave(A_ , dim=0 )
lowerCamelCase_ = negative_image_embeds.repeat_interleave(A_ , dim=0 )
lowerCamelCase_ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=A_ )
self.scheduler.set_timesteps(A_ , device=A_ )
lowerCamelCase_ = self.scheduler.timesteps
lowerCamelCase_ = self.unet.config.in_channels
lowerCamelCase_ , lowerCamelCase_ = downscale_height_and_width(A_ , A_ , self.movq_scale_factor )
# create initial latent
lowerCamelCase_ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , A_ , A_ , A_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(A_ ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase_ = {'image_embeds': image_embeds}
lowerCamelCase_ = self.unet(
sample=A_ , timestep=A_ , encoder_hidden_states=A_ , added_cond_kwargs=A_ , return_dict=A_ , )[0]
if do_classifier_free_guidance:
lowerCamelCase_ , lowerCamelCase_ = noise_pred.split(latents.shape[1] , dim=1 )
lowerCamelCase_ , lowerCamelCase_ = noise_pred.chunk(2 )
lowerCamelCase_ , lowerCamelCase_ = variance_pred.chunk(2 )
lowerCamelCase_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowerCamelCase_ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowerCamelCase_ , lowerCamelCase_ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase_ = self.scheduler.step(
A_ , A_ , A_ , generator=A_ , )[0]
# post-processing
lowerCamelCase_ = self.movq.decode(A_ , force_not_quantize=A_ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
lowerCamelCase_ = image * 0.5 + 0.5
lowerCamelCase_ = image.clamp(0 , 1 )
lowerCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCamelCase_ = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ )
| 70 | 0 |
'''simple docstring'''
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {'vocab_file': 'vocab.txt'}
__lowerCAmelCase = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
__lowerCAmelCase = {
'openbmb/cpm-ant-10b': 1_024,
}
def _UpperCAmelCase ( __A : Any ):
a_ : Union[str, Any] = collections.OrderedDict()
with open(__A , '''r''' , encoding='''utf-8''' ) as reader:
a_ : Optional[Any] = reader.readlines()
for index, token in enumerate(__A ):
a_ : Optional[Any] = token.rstrip('''\n''' )
a_ : str = index
return vocab
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int]="<unk>" , __SCREAMING_SNAKE_CASE : Optional[int]=200 ) -> Dict:
a_ : int = vocab
a_ : str = unk_token
a_ : int = max_input_chars_per_word
def SCREAMING_SNAKE_CASE ( self : Any , __SCREAMING_SNAKE_CASE : Dict ) -> int:
a_ : List[str] = list(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > self.max_input_chars_per_word:
return [self.unk_token]
a_ : Any = 0
a_ : Any = []
while start < len(__SCREAMING_SNAKE_CASE ):
a_ : Tuple = len(__SCREAMING_SNAKE_CASE )
a_ : List[Any] = None
while start < end:
a_ : Dict = ''''''.join(chars[start:end] )
if substr in self.vocab:
a_ : List[Any] = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = end
return sub_tokens
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ["input_ids", "attention_mask"]
snake_case__ = False
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict="<d>" , __SCREAMING_SNAKE_CASE : int="</d>" , __SCREAMING_SNAKE_CASE : Optional[int]="<s>" , __SCREAMING_SNAKE_CASE : Tuple="</s>" , __SCREAMING_SNAKE_CASE : List[Any]="<pad>" , __SCREAMING_SNAKE_CASE : int="<unk>" , __SCREAMING_SNAKE_CASE : Tuple="</n>" , __SCREAMING_SNAKE_CASE : str="</_>" , __SCREAMING_SNAKE_CASE : Union[str, Any]="left" , **__SCREAMING_SNAKE_CASE : Union[str, Any] , ) -> Union[str, Any]:
requires_backends(self , ['''jieba'''] )
super().__init__(
bod_token=__SCREAMING_SNAKE_CASE , eod_token=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , line_token=__SCREAMING_SNAKE_CASE , space_token=__SCREAMING_SNAKE_CASE , padding_side=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
a_ : Union[str, Any] = bod_token
a_ : str = eod_token
a_ : int = load_vocab(__SCREAMING_SNAKE_CASE )
a_ : Any = self.encoder[space_token]
a_ : List[Any] = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
a_ : Tuple = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __SCREAMING_SNAKE_CASE : x[1] ) )
a_ : List[Any] = {v: k for k, v in self.encoder.items()}
a_ : Tuple = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
return self.encoder[self.bod_token]
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
return self.encoder[self.eod_token]
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
return self.encoder["\n"]
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
return len(self.encoder )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Any:
a_ : Tuple = []
for x in jieba.cut(__SCREAMING_SNAKE_CASE , cut_all=__SCREAMING_SNAKE_CASE ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) )
return output_tokens
def SCREAMING_SNAKE_CASE ( self : Any , __SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : List[str] ) -> Tuple:
a_ : Any = [i for i in token_ids if i >= 0]
a_ : Optional[Any] = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple ) -> Union[str, Any]:
return token in self.encoder
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : List[str] ) -> str:
return "".join(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[int]:
return self.encoder.get(__SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : int ) -> Dict:
return self.decoder.get(__SCREAMING_SNAKE_CASE , self.unk_token )
def SCREAMING_SNAKE_CASE ( self : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
if os.path.isdir(__SCREAMING_SNAKE_CASE ):
a_ : List[Any] = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
a_ : Dict = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
a_ : Optional[int] = 0
if " " in self.encoder:
a_ : List[Any] = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
a_ : str = self.encoder['''\n''']
del self.encoder["\n"]
a_ : Dict = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __SCREAMING_SNAKE_CASE : x[1] ) )
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
''' Please check that the vocabulary is not corrupted!''' )
a_ : int = token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : List[int] = None ) -> List[int]:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None , __SCREAMING_SNAKE_CASE : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
if token_ids_a is not None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(__SCREAMING_SNAKE_CASE ))
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE ))
| 666 |
'''simple docstring'''
import sys
__lowerCAmelCase = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def _UpperCAmelCase ( __A : str ):
a_ : Tuple = 1
for digit in s:
product *= int(__A )
return product
def _UpperCAmelCase ( __A : str = N ):
a_ : Dict = -sys.maxsize - 1
a_ : Optional[int] = n[:13]
a_ : str = 13
while cur_index < len(__A ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
a_ : Tuple = substr[1:] + n[cur_index]
cur_index += 1
else:
a_ : Dict = max(__A , str_eval(__A ) )
a_ : List[str] = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 666 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _UpperCAmelCase :
a__ : Optional[Any] = PegasusConfig
a__ : Optional[int] = {}
a__ : Dict = "gelu"
def __init__( self : List[Any] , _lowercase : int , _lowercase : List[Any]=13 , _lowercase : Dict=7 , _lowercase : str=True , _lowercase : str=False , _lowercase : List[Any]=99 , _lowercase : str=32 , _lowercase : Dict=2 , _lowercase : Optional[Any]=4 , _lowercase : Optional[int]=37 , _lowercase : List[Any]=0.1 , _lowercase : Any=0.1 , _lowercase : Optional[Any]=40 , _lowercase : Dict=2 , _lowercase : List[str]=1 , _lowercase : List[str]=0 , ):
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = seq_length
__UpperCAmelCase = is_training
__UpperCAmelCase = use_labels
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = eos_token_id
__UpperCAmelCase = pad_token_id
__UpperCAmelCase = bos_token_id
def a ( self : Tuple ):
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__UpperCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__UpperCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__UpperCAmelCase = prepare_pegasus_inputs_dict(_lowercase , _lowercase , _lowercase )
return config, inputs_dict
def a ( self : Optional[int] , _lowercase : Tuple , _lowercase : str ):
__UpperCAmelCase = TFPegasusModel(config=_lowercase ).get_decoder()
__UpperCAmelCase = inputs_dict['''input_ids''']
__UpperCAmelCase = input_ids[:1, :]
__UpperCAmelCase = inputs_dict['''attention_mask'''][:1, :]
__UpperCAmelCase = inputs_dict['''head_mask''']
__UpperCAmelCase = 1
# first forward pass
__UpperCAmelCase = model(_lowercase , attention_mask=_lowercase , head_mask=_lowercase , use_cache=_lowercase )
__UpperCAmelCase , __UpperCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__UpperCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
__UpperCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__UpperCAmelCase = model(_lowercase , attention_mask=_lowercase )[0]
__UpperCAmelCase = model(_lowercase , attention_mask=_lowercase , past_key_values=_lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__UpperCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx]
__UpperCAmelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_lowercase , _lowercase , rtol=1E-3 )
def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :Optional[Any] , snake_case_ :str , snake_case_ :List[str]=None , snake_case_ :Dict=None , snake_case_ :List[Any]=None , snake_case_ :Dict=None , snake_case_ :int=None , ):
if attention_mask is None:
__UpperCAmelCase = tf.cast(tf.math.not_equal(snake_case_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__UpperCAmelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__UpperCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a__ : Optional[Any] = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
a__ : int = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
a__ : List[Any] = (
{
"conversational": TFPegasusForConditionalGeneration,
"feature-extraction": TFPegasusModel,
"summarization": TFPegasusForConditionalGeneration,
"text2text-generation": TFPegasusForConditionalGeneration,
"translation": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
a__ : Optional[Any] = True
a__ : Union[str, Any] = False
a__ : str = False
def a ( self : Dict ):
__UpperCAmelCase = TFPegasusModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=_lowercase )
def a ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def a ( self : Tuple ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_lowercase )
@require_sentencepiece
@require_tokenizers
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
a__ : Tuple = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
a__ : Dict = [
"California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to"
" reduce the risk of wildfires.",
"N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
a__ : List[Any] = "google/pegasus-xsum"
@cached_property
def a ( self : Optional[int] ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def a ( self : List[str] ):
__UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def a ( self : Tuple , **_lowercase : List[str] ):
__UpperCAmelCase = self.translate_src_text(**_lowercase )
assert self.expected_text == generated_words
def a ( self : Optional[Any] , **_lowercase : Any ):
__UpperCAmelCase = self.tokenizer(self.src_text , **_lowercase , padding=_lowercase , return_tensors='''tf''' )
__UpperCAmelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=_lowercase , )
__UpperCAmelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_lowercase )
return generated_words
@slow
def a ( self : Union[str, Any] ):
self._assert_generated_batch_equal_expected()
| 49 |
"""simple docstring"""
_snake_case = {
0: '''0''',
1: '''1''',
2: '''2''',
3: '''3''',
4: '''4''',
5: '''5''',
6: '''6''',
7: '''7''',
8: '''8''',
9: '''9''',
1_0: '''a''',
1_1: '''b''',
1_2: '''c''',
1_3: '''d''',
1_4: '''e''',
1_5: '''f''',
}
def __snake_case ( SCREAMING_SNAKE_CASE: float ):
"""simple docstring"""
assert type(SCREAMING_SNAKE_CASE ) in (int, float) and decimal == int(SCREAMING_SNAKE_CASE )
_lowerCAmelCase = int(SCREAMING_SNAKE_CASE )
_lowerCAmelCase = ''
_lowerCAmelCase = False
if decimal < 0:
_lowerCAmelCase = True
decimal *= -1
while decimal > 0:
_lowerCAmelCase , _lowerCAmelCase = divmod(SCREAMING_SNAKE_CASE , 16 )
_lowerCAmelCase = values[remainder] + hexadecimal
_lowerCAmelCase = '0x' + hexadecimal
if negative:
_lowerCAmelCase = '-' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 580 | 0 |
"""simple docstring"""
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def _SCREAMING_SNAKE_CASE ( _lowercase : int ) ->Union[str, Any]:
'''simple docstring'''
a : List[Any] = []
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
for v in tree.values():
shapes.extend(_fetch_dims(__lowerCAmelCase ) )
elif isinstance(__lowerCAmelCase , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(__lowerCAmelCase ) )
elif isinstance(__lowerCAmelCase , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("Not supported" )
return shapes
@torch.jit.ignore
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : Dict ) ->Tuple:
'''simple docstring'''
a : List[Any] = []
for d in reversed(__lowerCAmelCase ):
idx.append(flat_idx % d )
a : Dict = flat_idx // d
return tuple(reversed(__lowerCAmelCase ) )
@torch.jit.ignore
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] , _lowercase : str , _lowercase : Optional[int] , _lowercase : Union[str, Any] = None , _lowercase : Optional[Any] = None , ) ->List[Any]:
'''simple docstring'''
def reduce_edge_list(_lowercase : Union[str, Any] ) -> None:
a : List[Any] = True
for i in range(len(__lowerCAmelCase ) ):
a : Tuple = -1 * (i + 1)
l[reversed_idx] &= tally
a : str = l[reversed_idx]
if start_edges is None:
a : Union[str, Any] = [s == 0 for s in start]
reduce_edge_list(__lowerCAmelCase )
if end_edges is None:
a : List[str] = [e == (d - 1) for e, d in zip(__lowerCAmelCase , __lowerCAmelCase )]
reduce_edge_list(__lowerCAmelCase )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(__lowerCAmelCase ) == 0:
return [()]
elif len(__lowerCAmelCase ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
a : List[Tuple[slice, ...]] = []
a : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(__lowerCAmelCase , __lowerCAmelCase ):
if s == e:
path_list.append(slice(__lowerCAmelCase , s + 1 ) )
else:
break
a : Tuple[slice, ...] = tuple(__lowerCAmelCase )
a : Tuple = len(__lowerCAmelCase )
# start == end, and we're done
if divergence_idx == len(__lowerCAmelCase ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
a : List[str] = start[divergence_idx]
return tuple(
path + (slice(__lowerCAmelCase , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
a : int = end[divergence_idx]
return tuple(
path + (slice(__lowerCAmelCase , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
a : str = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[int] , _lowercase : Dict , _lowercase : str , _lowercase : Union[str, Any] ) ->Union[str, Any]:
'''simple docstring'''
a : Any = t.shape[:no_batch_dims]
a : Dict = list(_flat_idx_to_idx(__lowerCAmelCase , __lowerCAmelCase ) )
# _get_minimal_slice_set is inclusive
a : Optional[int] = list(_flat_idx_to_idx(flat_end - 1 , __lowerCAmelCase ) )
# Get an ordered list of slices to perform
a : Union[str, Any] = _get_minimal_slice_set(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
a : Optional[Any] = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] , _lowercase : int , _lowercase : str , _lowercase : List[str] , _lowercase : Tuple = False , _lowercase : int = None , _lowercase : Tuple = False , ) ->str:
'''simple docstring'''
if not (len(__lowerCAmelCase ) > 0):
raise ValueError("Must provide at least one input" )
a : int = [shape[:no_batch_dims] for shape in _fetch_dims(__lowerCAmelCase )]
a : Optional[int] = tuple([max(__lowerCAmelCase ) for s in zip(*__lowerCAmelCase )] )
def _prep_inputs(_lowercase : str ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
a : Optional[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
a : str = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
a : List[str] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
a : Dict[str, Any] = tensor_tree_map(_prep_inputs , __lowerCAmelCase )
a : List[Any] = None
if _out is not None:
a : Optional[int] = tensor_tree_map(lambda _lowercase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
a : List[Any] = 1
for d in orig_batch_dims:
flat_batch_dim *= d
a : List[Any] = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(_lowercase : Optional[Any] ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
a : Tuple = 0
a : Any = prepped_outputs
for _ in range(__lowerCAmelCase ):
# Chunk the input
if not low_mem:
a : Optional[int] = _select_chunk
else:
a : Tuple = partial(
_chunk_slice , flat_start=__lowerCAmelCase , flat_end=min(__lowerCAmelCase , i + chunk_size ) , no_batch_dims=len(__lowerCAmelCase ) , )
a : Dict[str, Any] = tensor_tree_map(__lowerCAmelCase , __lowerCAmelCase )
# Run the layer on the chunk
a : Optional[int] = layer(**__lowerCAmelCase )
# Allocate space for the output
if out is None:
a : Optional[Any] = tensor_tree_map(lambda _lowercase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , __lowerCAmelCase )
# Put the chunk in its pre-allocated space
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
def assign(_lowercase : Union[str, Any] , _lowercase : List[Any] ) -> None:
for k, v in da.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
assign(__lowerCAmelCase , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
a : str = da[k]
assign(__lowerCAmelCase , __lowerCAmelCase )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
for xa, xa in zip(__lowerCAmelCase , __lowerCAmelCase ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
a : Any = xa
elif isinstance(__lowerCAmelCase , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
a : int = output_chunk
else:
raise ValueError("Not supported" )
i += chunk_size
a : Union[str, Any] = tensor_tree_map(lambda _lowercase : t.view(orig_batch_dims + t.shape[1:] ) , __lowerCAmelCase )
return out
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ = 512 , ) -> str:
a : str = max_chunk_size
a : Optional[int] = None
a : Optional[tuple] = None
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
logging.info("Tuning chunk size..." )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
a : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
a : List[str] = [c for c in candidates if c > min_chunk_size]
a : int = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(lowerCAmelCase__ ) -> bool:
try:
with torch.no_grad():
fn(*lowerCamelCase__ , chunk_size=lowerCamelCase__ )
return True
except RuntimeError:
return False
a : List[Any] = 0
a : Tuple = len(lowerCamelCase__ ) - 1
while i > min_viable_chunk_size_index:
a : List[Any] = test_chunk_size(candidates[i] )
if not viable:
a : Union[str, Any] = (min_viable_chunk_size_index + i) // 2
else:
a : Optional[Any] = i
a : int = (i + len(lowerCamelCase__ ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> bool:
a : List[str] = True
for aa, aa in zip(lowerCamelCase__ , lowerCamelCase__ ):
assert type(lowerCamelCase__ ) == type(lowerCamelCase__ )
if isinstance(lowerCamelCase__ , (list, tuple) ):
consistent &= self._compare_arg_caches(lowerCamelCase__ , lowerCamelCase__ )
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
a : str = [v for _, v in sorted(aa.items() , key=lambda lowerCAmelCase__ : x[0] )]
a : List[Any] = [v for _, v in sorted(aa.items() , key=lambda lowerCAmelCase__ : x[0] )]
consistent &= self._compare_arg_caches(lowerCamelCase__ , lowerCamelCase__ )
else:
consistent &= aa == aa
return consistent
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> int:
a : Optional[Any] = True
a : tuple = tree_map(lambda lowerCAmelCase__ : a.shape if isinstance(lowerCamelCase__ , torch.Tensor ) else a , lowerCamelCase__ , lowerCamelCase__ )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(lowerCamelCase__ )
a : Dict = self._compare_arg_caches(self.cached_arg_data , lowerCamelCase__ )
else:
# Otherwise, we can reuse the precomputed value
a : int = False
if not consistent:
a : Any = self._determine_favorable_chunk_size(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , )
a : str = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 717 |
"""simple docstring"""
import baseaa
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->bytes:
'''simple docstring'''
return baseaa.aaaencode(string.encode("utf-8" ) )
def _SCREAMING_SNAKE_CASE ( _lowercase : bytes ) ->str:
'''simple docstring'''
return baseaa.aaadecode(_lowercase ).decode("utf-8" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31 | 0 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( UpperCamelCase : int = 5000_0000 ):
A__ = set()
A__ = int((limit - 24) ** (1 / 2) )
A__ = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , UpperCamelCase ) ) )
for primea in primes:
A__ = primea * primea
for primea in primes:
A__ = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
A__ = primea * primea * primea * primea
A__ = square + cube + tetr
if total >= limit:
break
ret.add(UpperCamelCase )
return len(UpperCamelCase )
if __name__ == "__main__":
print(F'{solution() = }')
| 574 |
"""simple docstring"""
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class _UpperCamelCase ( __snake_case):
__lowerCamelCase = "M-CLIP"
def __init__(self , lowerCamelCase__=1_0_2_4 , lowerCamelCase__=7_6_8 , **lowerCamelCase__ ):
"""simple docstring"""
A__ = transformerDimSize
A__ = imageDimSize
super().__init__(**lowerCamelCase__ )
class _UpperCamelCase ( __snake_case):
__lowerCamelCase = MCLIPConfig
def __init__(self , lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
super().__init__(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ )
A__ = XLMRobertaModel(lowerCamelCase__ )
A__ = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def A (self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
A__ = self.transformer(input_ids=lowerCamelCase__ , attention_mask=lowerCamelCase__ )[0]
A__ = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(lowerCamelCase__ ), embs
| 574 | 1 |
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'
)
a__: List[str] = None
a__: List[Any] = {
'7B': 11_008,
'13B': 13_824,
'30B': 17_920,
'65B': 22_016,
'70B': 28_672,
}
a__: List[str] = {
'7B': 1,
'7Bf': 1,
'13B': 2,
'13Bf': 2,
'30B': 4,
'65B': 8,
'70B': 8,
'70Bf': 8,
}
def UpperCamelCase__( UpperCamelCase__ : str , UpperCamelCase__ : List[str]=1 , UpperCamelCase__ : Optional[int]=2_56 )->List[str]:
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def UpperCamelCase__( UpperCamelCase__ : Optional[int] )->List[Any]:
with open(UpperCamelCase__ , '''r''' ) as f:
return json.load(UpperCamelCase__ )
def UpperCamelCase__( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] )->Dict:
with open(UpperCamelCase__ , '''w''' ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
def UpperCamelCase__( UpperCamelCase__ : Dict , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict=True )->Optional[int]:
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
A__ = os.path.join(UpperCamelCase__ , '''tmp''' )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
A__ = read_json(os.path.join(UpperCamelCase__ , '''params.json''' ) )
A__ = NUM_SHARDS[model_size]
A__ = params['''n_layers''']
A__ = params['''n_heads''']
A__ = n_heads // num_shards
A__ = params['''dim''']
A__ = dim // n_heads
A__ = 10000.0
A__ = 1.0 / (base ** (torch.arange(0 , UpperCamelCase__ , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
A__ = params['''n_kv_heads'''] # for GQA / MQA
A__ = n_heads_per_shard // num_key_value_heads
A__ = dim // num_key_value_heads
else: # compatibility with other checkpoints
A__ = n_heads
A__ = n_heads_per_shard
A__ = dim
# permute for sliced rotary
def permute(UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict=n_heads , UpperCamelCase__ : str=dim , UpperCamelCase__ : Any=dim ):
return w.view(UpperCamelCase__ , dima // n_heads // 2 , 2 , UpperCamelCase__ ).transpose(1 , 2 ).reshape(UpperCamelCase__ , UpperCamelCase__ )
print(f"Fetching all parameters from the checkpoint at {input_base_path}." )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
A__ = torch.load(os.path.join(UpperCamelCase__ , '''consolidated.00.pth''' ) , map_location='''cpu''' )
else:
# Sharded
A__ = [
torch.load(os.path.join(UpperCamelCase__ , f"consolidated.{i:02d}.pth" ) , map_location='''cpu''' )
for i in range(UpperCamelCase__ )
]
A__ = 0
A__ = {'''weight_map''': {}}
for layer_i in range(UpperCamelCase__ ):
A__ = f"pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"
if model_size == "7B":
# Unsharded
A__ = {
f"model.layers.{layer_i}.self_attn.q_proj.weight": permute(
loaded[f"layers.{layer_i}.attention.wq.weight"] ),
f"model.layers.{layer_i}.self_attn.k_proj.weight": permute(
loaded[f"layers.{layer_i}.attention.wk.weight"] ),
f"model.layers.{layer_i}.self_attn.v_proj.weight": loaded[f"layers.{layer_i}.attention.wv.weight"],
f"model.layers.{layer_i}.self_attn.o_proj.weight": loaded[f"layers.{layer_i}.attention.wo.weight"],
f"model.layers.{layer_i}.mlp.gate_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w1.weight"],
f"model.layers.{layer_i}.mlp.down_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w2.weight"],
f"model.layers.{layer_i}.mlp.up_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w3.weight"],
f"model.layers.{layer_i}.input_layernorm.weight": loaded[f"layers.{layer_i}.attention_norm.weight"],
f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[f"layers.{layer_i}.ffn_norm.weight"],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
A__ = {
f"model.layers.{layer_i}.input_layernorm.weight": loaded[0][
f"layers.{layer_i}.attention_norm.weight"
].clone(),
f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[0][
f"layers.{layer_i}.ffn_norm.weight"
].clone(),
}
A__ = permute(
torch.cat(
[
loaded[i][f"layers.{layer_i}.attention.wq.weight"].view(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for i in range(UpperCamelCase__ )
] , dim=0 , ).reshape(UpperCamelCase__ , UpperCamelCase__ ) )
A__ = permute(
torch.cat(
[
loaded[i][f"layers.{layer_i}.attention.wk.weight"].view(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for i in range(UpperCamelCase__ )
] , dim=0 , ).reshape(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
A__ = torch.cat(
[
loaded[i][f"layers.{layer_i}.attention.wv.weight"].view(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for i in range(UpperCamelCase__ )
] , dim=0 , ).reshape(UpperCamelCase__ , UpperCamelCase__ )
A__ = torch.cat(
[loaded[i][f"layers.{layer_i}.attention.wo.weight"] for i in range(UpperCamelCase__ )] , dim=1 )
A__ = torch.cat(
[loaded[i][f"layers.{layer_i}.feed_forward.w1.weight"] for i in range(UpperCamelCase__ )] , dim=0 )
A__ = torch.cat(
[loaded[i][f"layers.{layer_i}.feed_forward.w2.weight"] for i in range(UpperCamelCase__ )] , dim=1 )
A__ = torch.cat(
[loaded[i][f"layers.{layer_i}.feed_forward.w3.weight"] for i in range(UpperCamelCase__ )] , dim=0 )
A__ = inv_freq
for k, v in state_dict.items():
A__ = filename
param_count += v.numel()
torch.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
A__ = f"pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"
if model_size == "7B":
# Unsharded
A__ = {
'''model.embed_tokens.weight''': loaded['''tok_embeddings.weight'''],
'''model.norm.weight''': loaded['''norm.weight'''],
'''lm_head.weight''': loaded['''output.weight'''],
}
else:
A__ = {
'''model.norm.weight''': loaded[0]['''norm.weight'''],
'''model.embed_tokens.weight''': torch.cat(
[loaded[i]['''tok_embeddings.weight'''] for i in range(UpperCamelCase__ )] , dim=1 ),
'''lm_head.weight''': torch.cat([loaded[i]['''output.weight'''] for i in range(UpperCamelCase__ )] , dim=0 ),
}
for k, v in state_dict.items():
A__ = filename
param_count += v.numel()
torch.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
# Write configs
A__ = {'''total_size''': param_count * 2}
write_json(UpperCamelCase__ , os.path.join(UpperCamelCase__ , '''pytorch_model.bin.index.json''' ) )
A__ = params['''ffn_dim_multiplier'''] if '''ffn_dim_multiplier''' in params else 1
A__ = params['''multiple_of'''] if '''multiple_of''' in params else 2_56
A__ = LlamaConfig(
hidden_size=UpperCamelCase__ , intermediate_size=compute_intermediate_size(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , num_attention_heads=params['''n_heads'''] , num_hidden_layers=params['''n_layers'''] , rms_norm_eps=params['''norm_eps'''] , num_key_value_heads=UpperCamelCase__ , )
config.save_pretrained(UpperCamelCase__ )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('''Loading the checkpoint in a Llama model.''' )
A__ = LlamaForCausalLM.from_pretrained(UpperCamelCase__ , torch_dtype=torch.floataa , low_cpu_mem_usage=UpperCamelCase__ )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('''Saving in the Transformers format.''' )
model.save_pretrained(UpperCamelCase__ , safe_serialization=UpperCamelCase__ )
shutil.rmtree(UpperCamelCase__ )
def UpperCamelCase__( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] )->List[str]:
# Initialize the tokenizer based on the `spm` model
A__ = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f"Saving a {tokenizer_class.__name__} to {tokenizer_path}." )
A__ = tokenizer_class(UpperCamelCase__ )
tokenizer.save_pretrained(UpperCamelCase__ )
def UpperCamelCase__( )->Tuple:
A__ = argparse.ArgumentParser()
parser.add_argument(
'''--input_dir''' , help='''Location of LLaMA weights, which contains tokenizer.model and model folders''' , )
parser.add_argument(
'''--model_size''' , choices=['''7B''', '''7Bf''', '''13B''', '''13Bf''', '''30B''', '''65B''', '''70B''', '''70Bf''', '''tokenizer_only'''] , )
parser.add_argument(
'''--output_dir''' , help='''Location to write HF model and tokenizer''' , )
parser.add_argument('''--safe_serialization''' , type=UpperCamelCase__ , help='''Whether or not to save using `safetensors`.''' )
A__ = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
A__ = os.path.join(args.input_dir , '''tokenizer.model''' )
write_tokenizer(args.output_dir , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 716 |
def UpperCamelCase__( UpperCamelCase__ : int = 50 )->int:
A__ = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F"{solution() = }")
| 212 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] ,A_ : Union[str, Any] ,A_ : List[Any]=13 ,A_ : Optional[Any]=7 ,A_ : str=True ,A_ : List[Any]=True ,A_ : List[str]=True ,A_ : List[Any]=True ,A_ : Optional[int]=99 ,A_ : int=32 ,A_ : int=2 ,A_ : Any=4 ,A_ : Any=37 ,A_ : str="gelu" ,A_ : Tuple=0.1 ,A_ : List[str]=0.1 ,A_ : Union[str, Any]=512 ,A_ : Optional[int]=16 ,A_ : Tuple=2 ,A_ : Optional[Any]=0.02 ,A_ : int=3 ,A_ : Tuple=4 ,A_ : int=None ,A_ : Optional[int]=0 ,) -> Optional[Any]:
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_input_mask
A = use_token_type_ids
A = use_labels
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = type_sequence_label_size
A = initializer_range
A = num_labels
A = num_choices
A = scope
A = projection_dim
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
if self.use_token_type_ids:
A = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A = ids_tensor([self.batch_size] ,self.num_choices )
A = BertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=A_ ,initializer_range=self.initializer_range ,)
A = DPRConfig(projection_dim=self.projection_dim ,**config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : str ,A_ : Tuple ,A_ : Optional[int] ,A_ : List[str] ,A_ : Optional[int] ,A_ : Tuple ,A_ : List[str] ) -> List[Any]:
A = TFDPRContextEncoder(config=A_ )
A = model(A_ ,attention_mask=A_ ,token_type_ids=A_ )
A = model(A_ ,token_type_ids=A_ )
A = model(A_ )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.projection_dim or self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : Tuple ,A_ : Union[str, Any] ,A_ : Union[str, Any] ,A_ : Optional[Any] ,A_ : Tuple ) -> List[str]:
A = TFDPRQuestionEncoder(config=A_ )
A = model(A_ ,attention_mask=A_ ,token_type_ids=A_ )
A = model(A_ ,token_type_ids=A_ )
A = model(A_ )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.projection_dim or self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[Any] ,A_ : str ,A_ : str ,A_ : Optional[int] ,A_ : str ,A_ : str ,A_ : List[Any] ) -> List[str]:
A = TFDPRReader(config=A_ )
A = model(A_ ,attention_mask=A_ )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape ,(self.batch_size,) )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {'input_ids': input_ids}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Any = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
_lowerCamelCase: Any = {'''feature-extraction''': TFDPRQuestionEncoder} if is_tf_available() else {}
_lowerCamelCase: Tuple = False
_lowerCamelCase: str = False
_lowerCamelCase: int = False
_lowerCamelCase: List[Any] = False
_lowerCamelCase: Union[str, Any] = False
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
A = TFDPRModelTester(self )
A = ConfigTester(self ,config_class=A_ ,hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Any:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*A_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = TFDPRContextEncoder.from_pretrained(A_ )
self.assertIsNotNone(A_ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = TFDPRContextEncoder.from_pretrained(A_ )
self.assertIsNotNone(A_ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = TFDPRQuestionEncoder.from_pretrained(A_ )
self.assertIsNotNone(A_ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = TFDPRReader.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
A = TFDPRQuestionEncoder.from_pretrained('facebook/dpr-question_encoder-single-nq-base' )
A = tf.constant(
[[101, 7592, 1010, 2003, 2026, 3899, 1_0140, 1029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
A = model(A_ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
A = tf.constant(
[
[
0.03_23_62_53,
0.12_75_33_35,
0.16_81_85_09,
0.00_27_97_86,
0.3_89_69_33,
0.24_26_49_45,
0.2_17_89_71,
-0.02_33_52_27,
-0.08_48_19_59,
-0.14_32_41_17,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() ,expected_slice.numpy() ,atol=1e-4 ) ) | 91 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
__SCREAMING_SNAKE_CASE = False
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
return 12
@property
def SCREAMING_SNAKE_CASE_ ( self :int ):
return 12
@property
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
return 32
@property
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
torch.manual_seed(0 )
_a = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
_a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
torch.manual_seed(0 )
_a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(UpperCamelCase__ )
@property
def SCREAMING_SNAKE_CASE_ ( self :str ):
torch.manual_seed(0 )
_a = 12
_a = 12
_a = {
"attention_bias": True,
"cross_attention_dim": 32,
"attention_head_dim": height * width,
"num_attention_heads": 1,
"num_vector_embeds": self.num_embed,
"num_embeds_ada_norm": self.num_embeds_ada_norm,
"norm_num_groups": 32,
"sample_size": width,
"activation_fn": "geglu-approximate",
}
_a = TransformeraDModel(**UpperCamelCase__ )
return model
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] ):
_a = "cpu"
_a = self.dummy_vqvae
_a = self.dummy_text_encoder
_a = self.dummy_tokenizer
_a = self.dummy_transformer
_a = VQDiffusionScheduler(self.num_embed )
_a = LearnedClassifierFreeSamplingEmbeddings(learnable=UpperCamelCase__ )
_a = VQDiffusionPipeline(
vqvae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , transformer=UpperCamelCase__ , scheduler=UpperCamelCase__ , learned_classifier_free_sampling_embeddings=UpperCamelCase__ , )
_a = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_a = "teddy bear playing in the pool"
_a = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
_a = pipe([prompt] , generator=UpperCamelCase__ , num_inference_steps=2 , output_type="np" )
_a = output.images
_a = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
_a = pipe(
[prompt] , generator=UpperCamelCase__ , output_type="np" , return_dict=UpperCamelCase__ , num_inference_steps=2 )[0]
_a = image[0, -3:, -3:, -1]
_a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
_a = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] ):
_a = "cpu"
_a = self.dummy_vqvae
_a = self.dummy_text_encoder
_a = self.dummy_tokenizer
_a = self.dummy_transformer
_a = VQDiffusionScheduler(self.num_embed )
_a = LearnedClassifierFreeSamplingEmbeddings(
learnable=UpperCamelCase__ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
_a = VQDiffusionPipeline(
vqvae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , transformer=UpperCamelCase__ , scheduler=UpperCamelCase__ , learned_classifier_free_sampling_embeddings=UpperCamelCase__ , )
_a = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_a = "teddy bear playing in the pool"
_a = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
_a = pipe([prompt] , generator=UpperCamelCase__ , num_inference_steps=2 , output_type="np" )
_a = output.images
_a = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
_a = pipe(
[prompt] , generator=UpperCamelCase__ , output_type="np" , return_dict=UpperCamelCase__ , num_inference_steps=2 )[0]
_a = image[0, -3:, -3:, -1]
_a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
_a = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self :List[Any] ):
_a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy" )
_a = VQDiffusionPipeline.from_pretrained("microsoft/vq-diffusion-ithq" )
_a = pipeline.to(UpperCamelCase__ )
pipeline.set_progress_bar_config(disable=UpperCamelCase__ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
_a = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
_a = pipeline(
"teddy bear playing in the pool" , num_images_per_prompt=1 , generator=UpperCamelCase__ , output_type="np" , )
_a = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 388 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = tempfile.mkdtemp()
__UpperCAmelCase : str = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
__UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
__UpperCAmelCase : Union[str, Any] = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48145466, 0.4578275, 0.40821073],
"""image_std""": [0.26862954, 0.26130258, 0.27577711],
}
__UpperCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , UpperCamelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(UpperCamelCase , UpperCamelCase )
def lowerCamelCase__ ( self : Union[str, Any] , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase )
def lowerCamelCase__ ( self : List[Any] , **UpperCamelCase : List[str] ):
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase )
def lowerCamelCase__ ( self : List[Any] , **UpperCamelCase : str ):
'''simple docstring'''
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Dict = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__UpperCAmelCase : int = [Image.fromarray(np.moveaxis(UpperCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : str = self.get_tokenizer()
__UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer()
__UpperCAmelCase : Union[str, Any] = self.get_image_processor()
__UpperCAmelCase : Union[str, Any] = AlignProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
__UpperCAmelCase : int = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase )
__UpperCAmelCase : Any = AlignProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
__UpperCAmelCase : List[str] = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase )
self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCamelCase )
self.assertIsInstance(processor_fast.image_processor , UpperCamelCase )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : str = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__UpperCAmelCase : Optional[Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__UpperCAmelCase : Optional[Any] = self.get_image_processor(do_normalize=UpperCamelCase , padding_value=1.0 )
__UpperCAmelCase : Tuple = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = self.get_image_processor()
__UpperCAmelCase : Optional[int] = self.get_tokenizer()
__UpperCAmelCase : Any = AlignProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
__UpperCAmelCase : Any = self.prepare_image_inputs()
__UpperCAmelCase : List[Any] = image_processor(UpperCamelCase , return_tensors="""np""" )
__UpperCAmelCase : Any = processor(images=UpperCamelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.get_image_processor()
__UpperCAmelCase : Dict = self.get_tokenizer()
__UpperCAmelCase : List[Any] = AlignProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
__UpperCAmelCase : List[Any] = """lower newer"""
__UpperCAmelCase : int = processor(text=UpperCamelCase )
__UpperCAmelCase : Any = tokenizer(UpperCamelCase , padding="""max_length""" , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : Tuple = self.get_image_processor()
__UpperCAmelCase : Tuple = self.get_tokenizer()
__UpperCAmelCase : Tuple = AlignProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
__UpperCAmelCase : List[str] = """lower newer"""
__UpperCAmelCase : Tuple = self.prepare_image_inputs()
__UpperCAmelCase : Optional[Any] = processor(text=UpperCamelCase , images=UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase ):
processor()
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = self.get_image_processor()
__UpperCAmelCase : Optional[Any] = self.get_tokenizer()
__UpperCAmelCase : Tuple = AlignProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
__UpperCAmelCase : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCAmelCase : Tuple = processor.batch_decode(UpperCamelCase )
__UpperCAmelCase : Tuple = tokenizer.batch_decode(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : str = self.get_image_processor()
__UpperCAmelCase : Any = self.get_tokenizer()
__UpperCAmelCase : Any = AlignProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
__UpperCAmelCase : List[Any] = """lower newer"""
__UpperCAmelCase : List[Any] = self.prepare_image_inputs()
__UpperCAmelCase : List[str] = processor(text=UpperCamelCase , images=UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 299 |
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCAmelCase : str = logging.get_logger(__name__)
@add_end_docstrings(A )
class lowerCamelCase__ ( A ):
"""simple docstring"""
def __init__( self : List[str] , *UpperCamelCase : Dict , **UpperCamelCase : int ):
'''simple docstring'''
super().__init__(*UpperCamelCase , **UpperCamelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : int=None , UpperCamelCase : Tuple=None , UpperCamelCase : List[Any]=None ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = {}
__UpperCAmelCase : int = {}
if prompt is not None:
__UpperCAmelCase : int = prompt
if generate_kwargs is not None:
__UpperCAmelCase : Dict = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
__UpperCAmelCase : List[Any] = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
__UpperCAmelCase : Dict = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Any , UpperCamelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **UpperCamelCase : str ):
'''simple docstring'''
return super().__call__(UpperCamelCase , **UpperCamelCase )
def lowerCamelCase__ ( self : Dict , UpperCamelCase : List[Any] , UpperCamelCase : List[str]=None ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = load_image(UpperCamelCase )
if prompt is not None:
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError(
f'''Received an invalid text input, got - {type(UpperCamelCase )} - but expected a single string. '''
"""Note also that one single text can be provided for conditional image to text generation.""" )
__UpperCAmelCase : Dict = self.model.config.model_type
if model_type == "git":
__UpperCAmelCase : str = self.image_processor(images=UpperCamelCase , return_tensors=self.framework )
__UpperCAmelCase : Optional[Any] = self.tokenizer(text=UpperCamelCase , add_special_tokens=UpperCamelCase ).input_ids
__UpperCAmelCase : List[str] = [self.tokenizer.cls_token_id] + input_ids
__UpperCAmelCase : List[str] = torch.tensor(UpperCamelCase ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
__UpperCAmelCase : Optional[Any] = self.image_processor(images=UpperCamelCase , header_text=UpperCamelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
__UpperCAmelCase : Any = self.image_processor(images=UpperCamelCase , return_tensors=self.framework )
__UpperCAmelCase : Dict = self.tokenizer(UpperCamelCase , return_tensors=self.framework )
model_inputs.update(UpperCamelCase )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
__UpperCAmelCase : Any = self.image_processor(images=UpperCamelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
__UpperCAmelCase : Optional[int] = None
return model_inputs
def lowerCamelCase__ ( self : Any , UpperCamelCase : Tuple , UpperCamelCase : Union[str, Any]=None ):
'''simple docstring'''
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , UpperCamelCase )
and all(x is None for x in model_inputs["""input_ids"""] )
):
__UpperCAmelCase : str = None
if generate_kwargs is None:
__UpperCAmelCase : Any = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
__UpperCAmelCase : Tuple = model_inputs.pop(self.model.main_input_name )
__UpperCAmelCase : int = self.model.generate(UpperCamelCase , **UpperCamelCase , **UpperCamelCase )
return model_outputs
def lowerCamelCase__ ( self : Tuple , UpperCamelCase : int ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = []
for output_ids in model_outputs:
__UpperCAmelCase : int = {
"""generated_text""": self.tokenizer.decode(
UpperCamelCase , skip_special_tokens=UpperCamelCase , )
}
records.append(UpperCamelCase )
return records
| 299 | 1 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ :
def __init__( self : Tuple , snake_case_ : Any , snake_case_ : Dict=1_3 , snake_case_ : int=3_0 , snake_case_ : str=2 , snake_case_ : Optional[int]=3 , snake_case_ : Dict=True , snake_case_ : str=True , snake_case_ : Optional[Any]=3_2 , snake_case_ : Union[str, Any]=2 , snake_case_ : str=4 , snake_case_ : Any=3_7 , snake_case_ : Tuple="gelu" , snake_case_ : List[str]=0.1 , snake_case_ : Dict=0.1 , snake_case_ : Dict=1_0 , snake_case_ : Tuple=0.0_2 , snake_case_ : int=3 , snake_case_ : str=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCAmelCase = (image_size // patch_size) ** 2
_UpperCAmelCase = num_patches + 1
def lowercase ( self : Dict ):
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def lowercase ( self : Optional[int] ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
def lowercase ( self : Dict , snake_case_ : Tuple , snake_case_ : Dict , snake_case_ : Dict ):
_UpperCAmelCase = TFViTModel(config=snake_case_ )
_UpperCAmelCase = model(snake_case_ , training=snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
_UpperCAmelCase = self.image_size // 2
_UpperCAmelCase = pixel_values[:, :, :image_size, :image_size]
_UpperCAmelCase = model(snake_case_ , interpolate_pos_encoding=snake_case_ , training=snake_case_ )
_UpperCAmelCase = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def lowercase ( self : Union[str, Any] , snake_case_ : Tuple , snake_case_ : int , snake_case_ : Tuple ):
_UpperCAmelCase = self.type_sequence_label_size
_UpperCAmelCase = TFViTForImageClassification(snake_case_ )
_UpperCAmelCase = model(snake_case_ , labels=snake_case_ , training=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
_UpperCAmelCase = self.image_size // 2
_UpperCAmelCase = pixel_values[:, :, :image_size, :image_size]
_UpperCAmelCase = model(snake_case_ , interpolate_pos_encoding=snake_case_ , training=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_UpperCAmelCase = 1
_UpperCAmelCase = TFViTForImageClassification(snake_case_ )
_UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCAmelCase = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class A_ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : Dict = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
_lowerCamelCase : Tuple = (
{"""feature-extraction""": TFViTModel, """image-classification""": TFViTForImageClassification}
if is_tf_available()
else {}
)
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : str = False
_lowerCamelCase : Optional[int] = False
def lowercase ( self : Dict ):
_UpperCAmelCase = TFViTModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=3_7 )
def lowercase ( self : Any ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def lowercase ( self : Optional[Any] ):
pass
@unittest.skip(reason="ViT does not use inputs_embeds" )
def lowercase ( self : int ):
pass
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(snake_case_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case_ , tf.keras.layers.Layer ) )
def lowercase ( self : int ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(snake_case_ )
_UpperCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case_ )
def lowercase ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def lowercase ( self : Any ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = TFViTModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(snake_case_ )
def UpperCAmelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class A_ ( unittest.TestCase ):
@cached_property
def lowercase ( self : List[Any] ):
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def lowercase ( self : int ):
_UpperCAmelCase = TFViTForImageClassification.from_pretrained("google/vit-base-patch16-224" )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=snake_case_ , return_tensors="tf" )
# forward pass
_UpperCAmelCase = model(**snake_case_ )
# verify the logits
_UpperCAmelCase = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case_ )
_UpperCAmelCase = tf.constant([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] )
tf.debugging.assert_near(outputs.logits[0, :3] , snake_case_ , atol=1e-4 )
| 236 |
'''simple docstring'''
import math
import tensorflow as tf
from packaging import version
def UpperCAmelCase_ ( __lowercase : Optional[Any] ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = tf.convert_to_tensor(__lowercase )
_UpperCAmelCase = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def UpperCAmelCase_ ( __lowercase : Optional[Any] ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = tf.convert_to_tensor(__lowercase )
_UpperCAmelCase = tf.cast(math.pi , x.dtype )
_UpperCAmelCase = tf.cast(0.04_4715 , x.dtype )
_UpperCAmelCase = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(__lowercase , 3 )) ))
return x * cdf
def UpperCAmelCase_ ( __lowercase : str ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = tf.convert_to_tensor(__lowercase )
return x * tf.tanh(tf.math.softplus(__lowercase ) )
def UpperCAmelCase_ ( __lowercase : Optional[int] ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = tf.convert_to_tensor(__lowercase )
_UpperCAmelCase = tf.cast(0.04_4715 , x.dtype )
_UpperCAmelCase = tf.cast(0.79_7884_5608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def UpperCAmelCase_ ( __lowercase : Optional[Any] ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = tf.convert_to_tensor(__lowercase )
_UpperCAmelCase = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def UpperCAmelCase_ ( __lowercase : List[str] ) -> Optional[int]:
'''simple docstring'''
return tf.clip_by_value(_gelu(__lowercase ) , -10 , 10 )
def UpperCAmelCase_ ( __lowercase : int , __lowercase : str=-1 ) -> str:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = tf.split(__lowercase , 2 , axis=__lowercase )
return a * tf.math.sigmoid(__lowercase )
if version.parse(tf.version.VERSION) >= version.parse('''2.4'''):
def UpperCAmelCase_ ( __lowercase : Dict ) -> int:
'''simple docstring'''
return tf.keras.activations.gelu(__lowercase , approximate=__lowercase )
__SCREAMING_SNAKE_CASE :Dict = tf.keras.activations.gelu
__SCREAMING_SNAKE_CASE :str = approximate_gelu_wrap
else:
__SCREAMING_SNAKE_CASE :Optional[int] = _gelu
__SCREAMING_SNAKE_CASE :str = _gelu_new
__SCREAMING_SNAKE_CASE :Tuple = {
'''gelu''': gelu,
'''gelu_10''': gelu_aa,
'''gelu_fast''': gelu_fast,
'''gelu_new''': gelu_new,
'''glu''': glu,
'''mish''': mish,
'''quick_gelu''': quick_gelu,
'''relu''': tf.keras.activations.relu,
'''sigmoid''': tf.keras.activations.sigmoid,
'''silu''': tf.keras.activations.swish,
'''swish''': tf.keras.activations.swish,
'''tanh''': tf.keras.activations.tanh,
}
def UpperCAmelCase_ ( __lowercase : Optional[Any] ) -> Any:
'''simple docstring'''
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(f'function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}' )
| 236 | 1 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
SCREAMING_SNAKE_CASE_ = 16
SCREAMING_SNAKE_CASE_ = 32
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ = 16, SCREAMING_SNAKE_CASE__ = "bert-base-cased" ) -> str:
a_ : int = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
a_ : Dict = load_dataset("glue", "mrpc" )
def tokenize_function(SCREAMING_SNAKE_CASE__ ):
# max_length=None => use the model max length (it's actually the default)
a_ : str = tokenizer(examples["sentence1"], examples["sentence2"], truncation=SCREAMING_SNAKE_CASE__, max_length=SCREAMING_SNAKE_CASE__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
a_ : str = datasets.map(
SCREAMING_SNAKE_CASE__, batched=SCREAMING_SNAKE_CASE__, remove_columns=["idx", "sentence1", "sentence2"], load_from_cache_file=SCREAMING_SNAKE_CASE__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
a_ : Union[str, Any] = tokenized_datasets.rename_column("label", "labels" )
def collate_fn(SCREAMING_SNAKE_CASE__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(SCREAMING_SNAKE_CASE__, padding="max_length", max_length=128, return_tensors="pt" )
return tokenizer.pad(SCREAMING_SNAKE_CASE__, padding="longest", return_tensors="pt" )
# Instantiate dataloaders.
a_ : List[Any] = DataLoader(
tokenized_datasets["train"], shuffle=SCREAMING_SNAKE_CASE__, collate_fn=SCREAMING_SNAKE_CASE__, batch_size=SCREAMING_SNAKE_CASE__ )
a_ : List[str] = DataLoader(
tokenized_datasets["validation"], shuffle=SCREAMING_SNAKE_CASE__, collate_fn=SCREAMING_SNAKE_CASE__, batch_size=SCREAMING_SNAKE_CASE__ )
return train_dataloader, eval_dataloader
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
# Initialize accelerator
a_ : Any = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a_ : Dict = config["lr"]
a_ : Optional[Any] = int(config["num_epochs"] )
a_ : int = int(config["seed"] )
a_ : Optional[int] = int(config["batch_size"] )
a_ : Any = args.model_name_or_path
set_seed(SCREAMING_SNAKE_CASE__ )
a_ : Optional[Any] = get_dataloaders(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a_ : int = AutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE__, return_dict=SCREAMING_SNAKE_CASE__ )
# Instantiate optimizer
a_ : Union[str, Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
a_ : Dict = optimizer_cls(params=model.parameters(), lr=SCREAMING_SNAKE_CASE__ )
if accelerator.state.deepspeed_plugin is not None:
a_ : List[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
a_ : Tuple = 1
a_ : List[Any] = (len(SCREAMING_SNAKE_CASE__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
a_ : int = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE__, num_warmup_steps=0, num_training_steps=SCREAMING_SNAKE_CASE__, )
else:
a_ : int = DummyScheduler(SCREAMING_SNAKE_CASE__, total_num_steps=SCREAMING_SNAKE_CASE__, warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a_ : Any = accelerator.prepare(
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
# We need to keep track of how many total steps we have iterated over
a_ : List[Any] = 0
# We also need to keep track of the stating epoch so files are named properly
a_ : List[Any] = 0
# Now we train the model
a_ : Dict = evaluate.load("glue", "mrpc" )
a_ : Any = 0
a_ : Optional[Any] = {}
for epoch in range(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
a_ : str = model(**SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = outputs.loss
a_ : str = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
a_ : str = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
a_ : Optional[Any] = model(**SCREAMING_SNAKE_CASE__ )
a_ : Any = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
a_ : List[Any] = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(SCREAMING_SNAKE_CASE__ ) - 1:
a_ : Tuple = predictions[: len(eval_dataloader.dataset ) - samples_seen]
a_ : Union[str, Any] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE__, references=SCREAMING_SNAKE_CASE__, )
a_ : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""", SCREAMING_SNAKE_CASE__ )
a_ : Any = eval_metric["accuracy"]
if best_performance < eval_metric["accuracy"]:
a_ : Tuple = eval_metric["accuracy"]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"""
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir, "all_results.json" ), "w" ) as f:
json.dump(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase_ ( ) -> Any:
a_ : Tuple = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path", type=SCREAMING_SNAKE_CASE__, default="bert-base-cased", help="Path to pretrained model or model identifier from huggingface.co/models.", required=SCREAMING_SNAKE_CASE__, )
parser.add_argument(
"--output_dir", type=SCREAMING_SNAKE_CASE__, default=".", help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", )
parser.add_argument(
"--performance_lower_bound", type=SCREAMING_SNAKE_CASE__, default=SCREAMING_SNAKE_CASE__, help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.", )
parser.add_argument(
"--num_epochs", type=SCREAMING_SNAKE_CASE__, default=3, help="Number of train epochs.", )
a_ : Optional[Any] = parser.parse_args()
a_ : List[str] = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main() | 702 |
"""simple docstring"""
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ = 10, SCREAMING_SNAKE_CASE__ = 1_000, SCREAMING_SNAKE_CASE__ = True ) -> int:
assert (
isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
and isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
and isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)" )
return min_val if option else max_val
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> int:
return int((number_a + number_a) / 2 )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> None:
assert (
isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) and isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) and isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)" )
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value" )
def answer(SCREAMING_SNAKE_CASE__ ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started..." )
a_ : List[str] = lower
a_ : Dict = higher
a_ : str = []
while True:
a_ : List[str] = get_avg(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
last_numbers.append(SCREAMING_SNAKE_CASE__ )
if answer(SCREAMING_SNAKE_CASE__ ) == "low":
a_ : Optional[Any] = number
elif answer(SCREAMING_SNAKE_CASE__ ) == "high":
a_ : Union[str, Any] = number
else:
break
print(F"""guess the number : {last_numbers[-1]}""" )
print(F"""details : {last_numbers!s}""" )
def lowerCAmelCase_ ( ) -> None:
a_ : str = int(input("Enter lower value : " ).strip() )
a_ : Dict = int(input("Enter high value : " ).strip() )
a_ : Optional[Any] = int(input("Enter value to guess : " ).strip() )
guess_the_number(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main() | 370 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class lowercase( unittest.TestCase ):
'''simple docstring'''
def __init__( self: int, a_: List[Any], a_: str=13, a_: Tuple=7, a_: List[Any]=True, a_: Tuple=True, a_: int=True, a_: Optional[Any]=True, a_: Any=99, a_: Dict=32, a_: List[Any]=5, a_: Optional[int]=4, a_: Dict=37, a_: List[str]="gelu", a_: List[str]=0.1, a_: Dict=0.1, a_: List[Any]=512, a_: Tuple=16, a_: List[str]=2, a_: Any=0.02, a_: int=4, ):
'''simple docstring'''
_snake_case : Union[str, Any] = parent
_snake_case : Any = batch_size
_snake_case : Any = seq_length
_snake_case : Optional[Any] = is_training
_snake_case : List[Any] = use_attention_mask
_snake_case : int = use_token_type_ids
_snake_case : Tuple = use_labels
_snake_case : str = vocab_size
_snake_case : int = hidden_size
_snake_case : Union[str, Any] = num_hidden_layers
_snake_case : str = num_attention_heads
_snake_case : Dict = intermediate_size
_snake_case : Union[str, Any] = hidden_act
_snake_case : Dict = hidden_dropout_prob
_snake_case : Tuple = attention_probs_dropout_prob
_snake_case : Any = max_position_embeddings
_snake_case : Any = type_vocab_size
_snake_case : int = type_sequence_label_size
_snake_case : List[Any] = initializer_range
_snake_case : Optional[Any] = num_choices
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Any = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
_snake_case : Optional[Any] = None
if self.use_attention_mask:
_snake_case : int = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case : Tuple = None
if self.use_token_type_ids:
_snake_case : str = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
_snake_case : Tuple = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=a_, initializer_range=self.initializer_range, )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : List[Any] = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case , _snake_case : Any = config_and_inputs
_snake_case : int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[str] = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case , _snake_case : str = config_and_inputs
_snake_case : Dict = True
_snake_case : Any = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = True
lowercase__ = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : Any = FlaxRobertaPreLayerNormModelTester(self )
@slow
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
_snake_case : Optional[Any] = model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""", from_pt=a_ )
_snake_case : Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(a_ )
@require_flax
class lowercase( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Optional[int] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""", from_pt=a_ )
_snake_case : Optional[int] = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]], dtype=jnp.intaa )
_snake_case : int = model(a_ )[0]
_snake_case : Dict = [1, 11, 50_265]
self.assertEqual(list(output.shape ), a_ )
# compare the actual values for a slice.
_snake_case : List[Any] = np.array(
[[[40.4_880, 18.0_199, -5.2_367], [-1.8_877, -4.0_885, 10.7_085], [-2.2_613, -5.6_110, 7.2_665]]], dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3], a_, atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : int = FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""", from_pt=a_ )
_snake_case : str = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]], dtype=jnp.intaa )
_snake_case : Union[str, Any] = model(a_ )[0]
# compare the actual values for a slice.
_snake_case : str = np.array(
[[[0.0_208, -0.0_356, 0.0_237], [-0.1_569, -0.0_411, -0.2_626], [0.1_879, 0.0_125, -0.0_089]]], dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3], a_, atol=1E-4 ) )
| 609 |
"""simple docstring"""
import string
import numpy
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
return b if a == 0 else greatest_common_divisor(b % a , snake_case__ )
class lowercase:
'''simple docstring'''
lowercase__ = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
lowercase__ = numpy.vectorize(lambda __a : x % 36 )
lowercase__ = numpy.vectorize(__a )
def __init__( self: str, a_: numpy.ndarray ):
'''simple docstring'''
_snake_case : Optional[Any] = self.modulus(a_ ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
_snake_case : Tuple = encrypt_key.shape[0]
def UpperCamelCase_ ( self: Dict, a_: str ):
'''simple docstring'''
return self.key_string.index(a_ )
def UpperCamelCase_ ( self: List[Any], a_: int ):
'''simple docstring'''
return self.key_string[round(a_ )]
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : Optional[int] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
_snake_case : str = det % len(self.key_string )
_snake_case : List[Any] = len(self.key_string )
if greatest_common_divisor(a_, len(self.key_string ) ) != 1:
_snake_case : Optional[Any] = (
f"determinant modular {req_l} of encryption key({det}) "
f"is not co prime w.r.t {req_l}.\nTry another key."
)
raise ValueError(a_ )
def UpperCamelCase_ ( self: Union[str, Any], a_: str ):
'''simple docstring'''
_snake_case : Dict = [char for char in text.upper() if char in self.key_string]
_snake_case : str = chars[-1]
while len(a_ ) % self.break_key != 0:
chars.append(a_ )
return "".join(a_ )
def UpperCamelCase_ ( self: List[Any], a_: str ):
'''simple docstring'''
_snake_case : List[Any] = self.process_text(text.upper() )
_snake_case : Any = """"""
for i in range(0, len(a_ ) - self.break_key + 1, self.break_key ):
_snake_case : List[Any] = text[i : i + self.break_key]
_snake_case : Dict = [self.replace_letters(a_ ) for char in batch]
_snake_case : List[Any] = numpy.array([vec] ).T
_snake_case : List[Any] = self.modulus(self.encrypt_key.dot(a_ ) ).T.tolist()[
0
]
_snake_case : str = """""".join(
self.replace_digits(a_ ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : str = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
_snake_case : Any = det % len(self.key_string )
_snake_case : Dict = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
_snake_case : List[str] = i
break
_snake_case : List[Any] = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(a_ ) )
def UpperCamelCase_ ( self: int, a_: str ):
'''simple docstring'''
_snake_case : List[str] = self.make_decrypt_key()
_snake_case : List[str] = self.process_text(text.upper() )
_snake_case : Any = """"""
for i in range(0, len(a_ ) - self.break_key + 1, self.break_key ):
_snake_case : Union[str, Any] = text[i : i + self.break_key]
_snake_case : int = [self.replace_letters(a_ ) for char in batch]
_snake_case : Optional[Any] = numpy.array([vec] ).T
_snake_case : List[Any] = self.modulus(decrypt_key.dot(a_ ) ).T.tolist()[0]
_snake_case : Dict = """""".join(
self.replace_digits(a_ ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : List[str] = int(input("""Enter the order of the encryption key: """ ) )
_snake_case : int = []
print("""Enter each row of the encryption key with space separated integers""" )
for _ in range(snake_case__ ):
_snake_case : List[Any] = [int(snake_case__ ) for x in input().split()]
hill_matrix.append(snake_case__ )
_snake_case : Any = HillCipher(numpy.array(snake_case__ ) )
print("""Would you like to encrypt or decrypt some text? (1 or 2)""" )
_snake_case : Any = input("""\n1. Encrypt\n2. Decrypt\n""" )
if option == "1":
_snake_case : Optional[Any] = input("""What text would you like to encrypt?: """ )
print("""Your encrypted text is:""" )
print(hc.encrypt(snake_case__ ) )
elif option == "2":
_snake_case : List[Any] = input("""What text would you like to decrypt?: """ )
print("""Your decrypted text is:""" )
print(hc.decrypt(snake_case__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 609 | 1 |
'''simple docstring'''
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
class UpperCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = BartphoTokenizer
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
UpperCamelCase : Union[str, Any] = ["▁This", "▁is", "▁a", "▁t", "est"]
UpperCamelCase : Dict = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
UpperCamelCase : Any = {"unk_token": "<unk>"}
UpperCamelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"] )
with open(self.monolingual_vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(f'''{token} {vocab_tokens[token]}\n''' )
UpperCamelCase : int = BartphoTokenizer(lowerCamelCase , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self , **lowerCamelCase ) -> int:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase ) -> str:
'''simple docstring'''
UpperCamelCase : str = "This is a là test"
UpperCamelCase : Dict = "This is a<unk><unk> test"
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
UpperCamelCase : Any = BartphoTokenizer(lowerCamelCase , self.monolingual_vocab_file , **self.special_tokens_map )
UpperCamelCase : Dict = "This is a là test"
UpperCamelCase : Optional[Any] = "▁This ▁is ▁a ▁l à ▁t est".split()
UpperCamelCase : str = tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
UpperCamelCase : Optional[Any] = tokens + [tokenizer.unk_token]
UpperCamelCase : List[Any] = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) , lowerCamelCase )
| 715 |
'''simple docstring'''
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def A__ ( A : Optional[int] , A : List[str] , A : int , A : Any):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
UpperCamelCase : Union[str, Any] = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
UpperCamelCase : Optional[int] = F'''{src_lang}-{tgt_lang}'''
UpperCamelCase : Optional[int] = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "allenai/{model_name}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
'''
model_card_dir.mkdir(parents=A , exist_ok=A)
UpperCamelCase : Union[str, Any] = os.path.join(A , "README.md")
print(F'''Generating {path}''')
with open(A , "w" , encoding="utf-8") as f:
f.write(A)
# make sure we are under the root of the project
lowerCAmelCase_ = Path(__file__).resolve().parent.parent.parent
lowerCAmelCase_ = repo_dir / 'model_cards'
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
lowerCAmelCase_ = model_cards_dir / 'allenai' / model_name
write_model_card(model_card_dir, src_lang='en', tgt_lang='de', model_name=model_name)
| 435 | 0 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase_ )
class __magic_name__ ( lowerCAmelCase_ ):
def __init__( self , *__snake_case , **__snake_case ) -> str:
'''simple docstring'''
super().__init__(*__snake_case , **__snake_case )
self.check_model_type(__snake_case )
def __magic_name__ ( self , __snake_case=None , __snake_case=None , __snake_case=None , **__snake_case ) -> Optional[int]:
'''simple docstring'''
__a , __a ={}, {}
if padding is not None:
__a =padding
if truncation is not None:
__a =truncation
if top_k is not None:
__a =top_k
return preprocess_params, {}, postprocess_params
def __call__( self , __snake_case , __snake_case = None , **__snake_case ) -> int:
'''simple docstring'''
if isinstance(__snake_case , (Image.Image, str) ) and isinstance(__snake_case , __snake_case ):
__a ={'image': image, 'question': question}
else:
__a =image
__a =super().__call__(__snake_case , **__snake_case )
return results
def __magic_name__ ( self , __snake_case , __snake_case=False , __snake_case=False ) -> List[str]:
'''simple docstring'''
__a =load_image(inputs['image'] )
__a =self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=__snake_case , truncation=__snake_case )
__a =self.image_processor(images=__snake_case , return_tensors=self.framework )
model_inputs.update(__snake_case )
return model_inputs
def __magic_name__ ( self , __snake_case ) -> str:
'''simple docstring'''
__a =self.model(**__snake_case )
return model_outputs
def __magic_name__ ( self , __snake_case , __snake_case=5 ) -> int:
'''simple docstring'''
if top_k > self.model.config.num_labels:
__a =self.model.config.num_labels
if self.framework == "pt":
__a =model_outputs.logits.sigmoid()[0]
__a , __a =probs.topk(__snake_case )
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
__a =scores.tolist()
__a =ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(__snake_case , __snake_case )]
| 242 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = DistilBertTokenizer
SCREAMING_SNAKE_CASE = DistilBertTokenizerFast
SCREAMING_SNAKE_CASE = True
@slow
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =DistilBertTokenizer.from_pretrained('distilbert-base-uncased' )
__a =tokenizer.encode('sequence builders' , add_special_tokens=__snake_case )
__a =tokenizer.encode('multi-sequence build' , add_special_tokens=__snake_case )
__a =tokenizer.build_inputs_with_special_tokens(__snake_case )
__a =tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 242 | 1 |
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _UpperCamelCase:
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int=1_3 , SCREAMING_SNAKE_CASE__ : str=3_0 , SCREAMING_SNAKE_CASE__ : List[Any]=2 , SCREAMING_SNAKE_CASE__ : List[Any]=3 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=3_2 , SCREAMING_SNAKE_CASE__ : str=5 , SCREAMING_SNAKE_CASE__ : Any=4 , SCREAMING_SNAKE_CASE__ : str=3_7 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=1_0 , SCREAMING_SNAKE_CASE__ : List[Any]=0.02 , SCREAMING_SNAKE_CASE__ : Any=3 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.6 , SCREAMING_SNAKE_CASE__ : Tuple=None , ):
'''simple docstring'''
__a : List[Any] = parent
__a : int = batch_size
__a : str = image_size
__a : Union[str, Any] = patch_size
__a : Any = num_channels
__a : Optional[int] = is_training
__a : List[Any] = use_labels
__a : Union[str, Any] = hidden_size
__a : Any = num_hidden_layers
__a : Dict = num_attention_heads
__a : Union[str, Any] = intermediate_size
__a : Dict = hidden_act
__a : Tuple = hidden_dropout_prob
__a : Tuple = attention_probs_dropout_prob
__a : str = type_sequence_label_size
__a : str = initializer_range
__a : Union[str, Any] = mask_ratio
__a : int = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
__a : List[str] = (image_size // patch_size) ** 2
__a : Optional[int] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
__a : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : Optional[int] = None
if self.use_labels:
__a : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
__a : Dict = ViTMAEModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__a : List[Any] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
__a : List[str] = ViTMAEForPreTraining(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__a : Dict = model(SCREAMING_SNAKE_CASE__ )
__a : List[str] = (self.image_size // self.patch_size) ** 2
__a : Union[str, Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
__a : List[str] = 1
__a : str = ViTMAEForPreTraining(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__a : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a : Optional[Any] = model(SCREAMING_SNAKE_CASE__ )
__a : List[Any] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
__a : List[str] = self.prepare_config_and_inputs()
__a : Union[str, Any] = config_and_inputs
__a : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCamelCase( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : List[Any] = {'''feature-extraction''': ViTMAEModel} if is_torch_available() else {}
__SCREAMING_SNAKE_CASE : Optional[Any] = False
__SCREAMING_SNAKE_CASE : List[str] = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
__SCREAMING_SNAKE_CASE : Any = False
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
__a : Union[str, Any] = ViTMAEModelTester(self )
__a : Optional[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ , hidden_size=3_7 )
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
pass
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
__a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : List[str] = model_class(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__a : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE__ , nn.Linear ) )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : List[str] = model_class(SCREAMING_SNAKE_CASE__ )
__a : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : int = [*signature.parameters.keys()]
__a : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
__a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
__a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
np.random.seed(2 )
__a : Optional[int] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
__a : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
__a : List[str] = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
__a : Any = pt_noise
super().check_pt_tf_models(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
__a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Tuple = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
__a : Any = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
__a : Dict = outputs[0].cpu().numpy()
__a : Union[str, Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
__a : Any = model_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
__a : Dict = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# Make sure we don't have nans
__a : List[str] = after_outputs[0].cpu().numpy()
__a : Optional[Any] = 0
__a : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(SCREAMING_SNAKE_CASE__ , 1e-5 )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
pass
@slow
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Optional[Any] = ViTMAEModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def UpperCAmelCase__ ( ):
__a : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _UpperCamelCase( unittest.TestCase ):
@cached_property
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
np.random.seed(2 )
__a : Optional[int] = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' ).to(SCREAMING_SNAKE_CASE__ )
__a : Any = self.default_image_processor
__a : Optional[Any] = prepare_img()
__a : Union[str, Any] = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).to(SCREAMING_SNAKE_CASE__ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
__a : Tuple = ViTMAEConfig()
__a : Optional[int] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
__a : Optional[Any] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
__a : str = model(**SCREAMING_SNAKE_CASE__ , noise=torch.from_numpy(SCREAMING_SNAKE_CASE__ ).to(device=SCREAMING_SNAKE_CASE__ ) )
# verify the logits
__a : List[Any] = torch.Size((1, 1_9_6, 7_6_8) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
__a : Optional[int] = torch.tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(SCREAMING_SNAKE_CASE__ ) , atol=1e-4 ) )
| 719 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Tuple = ['''image_processor''', '''tokenizer''']
__SCREAMING_SNAKE_CASE : str = '''OwlViTImageProcessor'''
__SCREAMING_SNAKE_CASE : int = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , **SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
__a : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , SCREAMING_SNAKE_CASE__ , )
__a : List[Any] = kwargs.pop('feature_extractor' )
__a : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]="max_length" , SCREAMING_SNAKE_CASE__ : Optional[int]="np" , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or (isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and not isinstance(text[0] , SCREAMING_SNAKE_CASE__ )):
__a : Any = [self.tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )]
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(text[0] , SCREAMING_SNAKE_CASE__ ):
__a : Union[str, Any] = []
# Maximum number of queries across batch
__a : List[str] = max([len(SCREAMING_SNAKE_CASE__ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(SCREAMING_SNAKE_CASE__ ) != max_num_queries:
__a : Optional[int] = t + [' '] * (max_num_queries - len(SCREAMING_SNAKE_CASE__ ))
__a : Optional[Any] = self.tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
encodings.append(SCREAMING_SNAKE_CASE__ )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
__a : Optional[Any] = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
__a : str = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__a : Optional[Any] = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
__a : Tuple = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__a : Dict = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 )
__a : Union[str, Any] = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__a : Tuple = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 )
__a : Tuple = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
__a : Optional[Any] = BatchEncoding()
__a : Optional[Any] = input_ids
__a : List[Any] = attention_mask
if query_images is not None:
__a : str = BatchEncoding()
__a : Union[str, Any] = self.image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).pixel_values
__a : Tuple = query_pixel_values
if images is not None:
__a : int = self.image_processor(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if text is not None and images is not None:
__a : Union[str, Any] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__a : Dict = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE__ ) , tensor_type=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
return self.image_processor.post_process(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : int , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
return self.image_processor.post_process_object_detection(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Tuple , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Any , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , SCREAMING_SNAKE_CASE__ , )
return self.image_processor_class
@property
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , SCREAMING_SNAKE_CASE__ , )
return self.image_processor
| 577 | 0 |
"""simple docstring"""
from __future__ import annotations
_lowercase = []
def _snake_case ( snake_case__ : list[list[int]] , snake_case__ : int , snake_case__ : int ):
for i in range(len(snake_case__ ) ):
if board[row][i] == 1:
return False
for i in range(len(snake_case__ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(snake_case__ , -1 , -1 ) , range(snake_case__ , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(snake_case__ , -1 , -1 ) , range(snake_case__ , len(snake_case__ ) ) ):
if board[i][j] == 1:
return False
return True
def _snake_case ( snake_case__ : list[list[int]] , snake_case__ : int ):
if row >= len(snake_case__ ):
solution.append(snake_case__ )
printboard(snake_case__ )
print()
return True
for i in range(len(snake_case__ ) ):
if is_safe(snake_case__ , snake_case__ , snake_case__ ):
A = 1
solve(snake_case__ , row + 1 )
A = 0
return False
def _snake_case ( snake_case__ : list[list[int]] ):
for i in range(len(snake_case__ ) ):
for j in range(len(snake_case__ ) ):
if board[i][j] == 1:
print('Q' , end=' ' )
else:
print('.' , end=' ' )
print()
# n=int(input("The no. of queens"))
_lowercase = 8
_lowercase = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution)) | 91 |
'''simple docstring'''
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : int = OpenAIGPTTokenizer
_snake_case : Tuple = OpenAIGPTTokenizerFast
_snake_case : Union[str, Any] = True
_snake_case : Optional[int] = False
def snake_case__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCamelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
_UpperCamelCase = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
_UpperCamelCase = ['''#version: 0.2''', '''l o''', '''lo w''', '''e r</w>''', '''''']
_UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(lowerCAmelCase__ ) )
def snake_case__ ( self : Tuple , lowerCAmelCase__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
return "lower newer", "lower newer"
def snake_case__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
_UpperCamelCase = '''lower'''
_UpperCamelCase = ['''low''', '''er</w>''']
_UpperCamelCase = tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = tokens + ['''<unk>''']
_UpperCamelCase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ )
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Tuple=15 ) -> Union[str, Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_UpperCamelCase = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
# Simple input
_UpperCamelCase = '''This is a simple input'''
_UpperCamelCase = ['''This is a simple input 1''', '''This is a simple input 2''']
_UpperCamelCase = ('''This is a simple input''', '''This is a pair''')
_UpperCamelCase = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(lowerCAmelCase__ , tokenizer_r.encode , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(lowerCAmelCase__ , tokenizer_r.encode_plus , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(
lowerCAmelCase__ , tokenizer_r.batch_encode_plus , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding='''max_length''' , )
# Pair input
self.assertRaises(lowerCAmelCase__ , tokenizer_r.encode , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(lowerCAmelCase__ , tokenizer_r.encode_plus , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(
lowerCAmelCase__ , tokenizer_r.batch_encode_plus , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding='''max_length''' , )
def snake_case__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
pass
@require_ftfy
@require_spacy
@require_tokenizers
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
pass
| 98 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class __A ( lowerCamelCase__ ):
"""simple docstring"""
UpperCAmelCase__ = """pegasus"""
UpperCAmelCase__ = ["""past_key_values"""]
UpperCAmelCase__ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , a__=5_0265 , a__=1024 , a__=12 , a__=4096 , a__=16 , a__=12 , a__=4096 , a__=16 , a__=0.0 , a__=0.0 , a__=True , a__=True , a__="gelu" , a__=1024 , a__=0.1 , a__=0.0 , a__=0.0 , a__=0.02 , a__=0 , a__=False , a__=0 , a__=1 , a__=1 , **a__ , ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = vocab_size
_lowerCamelCase : Dict = max_position_embeddings
_lowerCamelCase : Union[str, Any] = d_model
_lowerCamelCase : List[Any] = encoder_ffn_dim
_lowerCamelCase : int = encoder_layers
_lowerCamelCase : str = encoder_attention_heads
_lowerCamelCase : List[Any] = decoder_ffn_dim
_lowerCamelCase : Optional[int] = decoder_layers
_lowerCamelCase : int = decoder_attention_heads
_lowerCamelCase : Tuple = dropout
_lowerCamelCase : Optional[Any] = attention_dropout
_lowerCamelCase : int = activation_dropout
_lowerCamelCase : Dict = activation_function
_lowerCamelCase : Union[str, Any] = init_std
_lowerCamelCase : List[str] = encoder_layerdrop
_lowerCamelCase : Optional[int] = decoder_layerdrop
_lowerCamelCase : Union[str, Any] = use_cache
_lowerCamelCase : Tuple = encoder_layers
_lowerCamelCase : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=a__ , eos_token_id=a__ , is_encoder_decoder=a__ , decoder_start_token_id=a__ , forced_eos_token_id=a__ , **a__ , )
@property
def __snake_case ( self):
"""simple docstring"""
return self.encoder_attention_heads
@property
def __snake_case ( self):
"""simple docstring"""
return self.d_model
| 613 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self):
"""simple docstring"""
super().tearDown()
gc.collect()
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase, _lowerCamelCase : Tuple = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-canny''' , from_pt=a__ , dtype=jnp.bfloataa)
_lowerCamelCase, _lowerCamelCase : Optional[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=a__ , from_pt=a__ , dtype=jnp.bfloataa)
_lowerCamelCase : Union[str, Any] = controlnet_params
_lowerCamelCase : str = '''bird'''
_lowerCamelCase : Union[str, Any] = jax.device_count()
_lowerCamelCase : Dict = pipe.prepare_text_inputs([prompts] * num_samples)
_lowerCamelCase : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''')
_lowerCamelCase : Tuple = pipe.prepare_image_inputs([canny_image] * num_samples)
_lowerCamelCase : Union[str, Any] = jax.random.PRNGKey(0)
_lowerCamelCase : Tuple = jax.random.split(a__ , jax.device_count())
_lowerCamelCase : Dict = replicate(a__)
_lowerCamelCase : Tuple = shard(a__)
_lowerCamelCase : Union[str, Any] = shard(a__)
_lowerCamelCase : Optional[Any] = pipe(
prompt_ids=a__ , image=a__ , params=a__ , prng_seed=a__ , num_inference_steps=50 , jit=a__ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
_lowerCamelCase : Optional[int] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
_lowerCamelCase : List[Any] = images[0, 253:256, 253:256, -1]
_lowerCamelCase : Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten()))
_lowerCamelCase : Optional[int] = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078])
print(F"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase, _lowerCamelCase : Any = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-openpose''' , from_pt=a__ , dtype=jnp.bfloataa)
_lowerCamelCase, _lowerCamelCase : Any = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=a__ , from_pt=a__ , dtype=jnp.bfloataa)
_lowerCamelCase : Optional[Any] = controlnet_params
_lowerCamelCase : Optional[Any] = '''Chef in the kitchen'''
_lowerCamelCase : int = jax.device_count()
_lowerCamelCase : Dict = pipe.prepare_text_inputs([prompts] * num_samples)
_lowerCamelCase : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png''')
_lowerCamelCase : str = pipe.prepare_image_inputs([pose_image] * num_samples)
_lowerCamelCase : Optional[Any] = jax.random.PRNGKey(0)
_lowerCamelCase : List[str] = jax.random.split(a__ , jax.device_count())
_lowerCamelCase : str = replicate(a__)
_lowerCamelCase : Union[str, Any] = shard(a__)
_lowerCamelCase : List[Any] = shard(a__)
_lowerCamelCase : Any = pipe(
prompt_ids=a__ , image=a__ , params=a__ , prng_seed=a__ , num_inference_steps=50 , jit=a__ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
_lowerCamelCase : int = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
_lowerCamelCase : Dict = images[0, 253:256, 253:256, -1]
_lowerCamelCase : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten()))
_lowerCamelCase : Optional[int] = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]])
print(F"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
| 613 | 1 |
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def _lowercase( ):
a__ =10
a__ =datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
a__ =datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [97], 'text': ['1976']}] * 10,
'id': list(range(__a ) ),
} , features=__a , )
return dataset
@pytest.fixture(scope='session' )
def _lowercase( __a : Optional[int] , __a : Dict ):
a__ =str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=__a )
return filename
# FILE_CONTENT + files
_lowerCAmelCase: Tuple = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope='session' )
def _lowercase( __a : List[Any] ):
a__ =tmp_path_factory.mktemp('data' ) / 'file.txt'
a__ =FILE_CONTENT
with open(__a , 'w' ) as f:
f.write(__a )
return filename
@pytest.fixture(scope='session' )
def _lowercase( __a : List[str] ):
import bza
a__ =tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
a__ =bytes(__a , 'utf-8' )
with bza.open(__a , 'wb' ) as f:
f.write(__a )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : List[Any] ):
import gzip
a__ =str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
a__ =bytes(__a , 'utf-8' )
with gzip.open(__a , 'wb' ) as f:
f.write(__a )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : Tuple ):
if datasets.config.LZ4_AVAILABLE:
import lza.frame
a__ =tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
a__ =bytes(__a , 'utf-8' )
with lza.frame.open(__a , 'wb' ) as f:
f.write(__a )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : List[Any] , __a : Tuple ):
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
a__ =tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(__a , 'w' ) as archive:
archive.write(__a , arcname=os.path.basename(__a ) )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : str , __a : Optional[int] ):
import tarfile
a__ =tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(__a , 'w' ) as f:
f.add(__a , arcname=os.path.basename(__a ) )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : Union[str, Any] ):
import lzma
a__ =tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
a__ =bytes(__a , 'utf-8' )
with lzma.open(__a , 'wb' ) as f:
f.write(__a )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : str , __a : Any ):
import zipfile
a__ =tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(__a , 'w' ) as f:
f.write(__a , arcname=os.path.basename(__a ) )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : Tuple ):
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
a__ =tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
a__ =bytes(__a , 'utf-8' )
with zstd.open(__a , 'wb' ) as f:
f.write(__a )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : str ):
a__ =tmp_path_factory.mktemp('data' ) / 'file.xml'
a__ =textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(__a , 'w' ) as f:
f.write(__a )
return filename
_lowerCAmelCase: str = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
_lowerCAmelCase: Any = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
_lowerCAmelCase: Optional[Any] = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
_lowerCAmelCase: List[Any] = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
_lowerCAmelCase: Optional[int] = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope='session' )
def _lowercase( ):
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def _lowercase( __a : Optional[int] ):
a__ =datasets.Dataset.from_dict(__a )
a__ =str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=__a )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : Optional[Any] ):
a__ =str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(__a ) ) as con:
a__ =con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : List[Any] ):
a__ =str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(__a , 'w' , newline='' ) as f:
a__ =csv.DictWriter(__a , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(__a )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : List[Any] ):
a__ =str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(__a , 'w' , newline='' ) as f:
a__ =csv.DictWriter(__a , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(__a )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : Optional[int] , __a : Tuple ):
import bza
a__ =tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(__a , 'rb' ) as f:
a__ =f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(__a , 'wb' ) as f:
f.write(__a )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : Optional[int] , __a : Optional[Any] , __a : int ):
a__ =tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(__a , 'w' ) as f:
f.write(__a , arcname=os.path.basename(__a ) )
f.write(__a , arcname=os.path.basename(__a ) )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : Dict , __a : str , __a : Tuple ):
a__ =tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(__a , 'w' ) as f:
f.write(__a , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(__a , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : Any , __a : str , __a : Union[str, Any] ):
a__ =tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(__a , 'w' ) as f:
f.write(__a , arcname=os.path.join('main_dir' , os.path.basename(__a ) ) )
f.write(__a , arcname=os.path.join('main_dir' , os.path.basename(__a ) ) )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : List[Any] ):
a__ =str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
a__ =pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(__a , 'wb' ) as f:
a__ =pq.ParquetWriter(__a , schema=__a )
a__ =pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(__a ) )] for k in DATA[0]} , schema=__a )
writer.write_table(__a )
writer.close()
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : Dict ):
a__ =str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
a__ ={'data': DATA}
with open(__a , 'w' ) as f:
json.dump(__a , __a )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : str ):
a__ =str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
a__ ={'data': DATA_DICT_OF_LISTS}
with open(__a , 'w' ) as f:
json.dump(__a , __a )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : Optional[Any] ):
a__ =str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(__a , 'w' ) as f:
for item in DATA:
f.write(json.dumps(__a ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : Optional[Any] ):
a__ =str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(__a , 'w' ) as f:
for item in DATA:
f.write(json.dumps(__a ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : Tuple ):
a__ =str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(__a , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(__a ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : Dict ):
a__ =str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(__a , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(__a ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : List[str] , __a : Union[str, Any] ):
import gzip
a__ =str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(__a , 'rb' ) as orig_file:
with gzip.open(__a , 'wb' ) as zipped_file:
zipped_file.writelines(__a )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : Optional[Any] , __a : Tuple ):
import gzip
a__ =str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(__a , 'rb' ) as orig_file:
with gzip.open(__a , 'wb' ) as zipped_file:
zipped_file.writelines(__a )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : Optional[int] , __a : Optional[int] , __a : List[Any] ):
a__ =tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(__a , 'w' ) as f:
f.write(__a , arcname=os.path.basename(__a ) )
f.write(__a , arcname=os.path.basename(__a ) )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : Any , __a : Optional[Any] , __a : Tuple , __a : Dict ):
a__ =tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(__a , 'w' ) as f:
f.write(__a , arcname=os.path.join('nested' , os.path.basename(__a ) ) )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : Optional[int] , __a : Optional[int] , __a : Union[str, Any] ):
a__ =tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(__a , 'w' ) as f:
f.write(__a , arcname=os.path.join('main_dir' , os.path.basename(__a ) ) )
f.write(__a , arcname=os.path.join('main_dir' , os.path.basename(__a ) ) )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : Optional[Any] , __a : Union[str, Any] , __a : Any ):
a__ =tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(__a , 'w' ) as f:
f.add(__a , arcname=os.path.basename(__a ) )
f.add(__a , arcname=os.path.basename(__a ) )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : int , __a : Union[str, Any] , __a : str , __a : Union[str, Any] ):
a__ =tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(__a , 'w' ) as f:
f.add(__a , arcname=os.path.join('nested' , os.path.basename(__a ) ) )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : Any ):
a__ =['0', '1', '2', '3']
a__ =str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(__a , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : Any ):
a__ =['0', '1', '2', '3']
a__ =str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(__a , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : Any ):
a__ =['0', '1', '2', '3']
a__ =tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(__a , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : List[str] , __a : Optional[Any] , __a : Dict ):
a__ =tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(__a , 'w' ) as f:
f.write(__a , arcname=os.path.basename(__a ) )
f.write(__a , arcname=os.path.basename(__a ) )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : int , __a : Union[str, Any] , __a : Optional[int] ):
a__ =tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(__a , 'w' ) as f:
f.write(__a , arcname=os.path.join('main_dir' , os.path.basename(__a ) ) )
f.write(__a , arcname=os.path.join('main_dir' , os.path.basename(__a ) ) )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : List[Any] , __a : Union[str, Any] , __a : Union[str, Any] ):
a__ =tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(__a , 'w' ) as f:
f.write(__a , arcname=os.path.basename('unsupported.ext' ) )
f.write(__a , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : Dict ):
a__ ='\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
a__ =str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(__a , 'w' , encoding='utf-8' ) as f:
f.write(__a )
return path
@pytest.fixture(scope='session' )
def _lowercase( ):
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def _lowercase( ):
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def _lowercase( __a : Optional[Any] , __a : Optional[Any] ):
a__ =tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(__a , 'w' ) as f:
f.write(__a , arcname=os.path.basename(__a ) )
f.write(__a , arcname=os.path.basename(__a ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def _lowercase( __a : List[Any] ):
a__ =tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
return data_dir
| 20 |
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class lowercase__ ( unittest.TestCase ):
def _UpperCAmelCase ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = tempfile.mkdtemp()
UpperCAmelCase__ = SamImageProcessor()
UpperCAmelCase__ = SamProcessor(_lowercase )
processor.save_pretrained(self.tmpdirname )
def _UpperCAmelCase ( self : Optional[int] , **_lowercase : str ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase ).image_processor
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
UpperCAmelCase__ = [Image.fromarray(np.moveaxis(_lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase__ = self.get_image_processor(do_normalize=_lowercase , padding_value=1.0 )
UpperCAmelCase__ = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_lowercase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowercase )
def _UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = SamProcessor(image_processor=_lowercase )
UpperCAmelCase__ = self.prepare_image_inputs()
UpperCAmelCase__ = image_processor(_lowercase , return_tensors="np" )
UpperCAmelCase__ = processor(images=_lowercase , return_tensors="np" )
input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("reshaped_input_sizes" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_torch
def _UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = SamProcessor(image_processor=_lowercase )
UpperCAmelCase__ = [torch.ones((1, 3, 5, 5) )]
UpperCAmelCase__ = [[17_64, 26_46]]
UpperCAmelCase__ = [[6_83, 10_24]]
UpperCAmelCase__ = processor.post_process_masks(_lowercase , _lowercase , _lowercase )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
UpperCAmelCase__ = processor.post_process_masks(
_lowercase , torch.tensor(_lowercase ) , torch.tensor(_lowercase ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
UpperCAmelCase__ = [np.ones((1, 3, 5, 5) )]
UpperCAmelCase__ = processor.post_process_masks(_lowercase , np.array(_lowercase ) , np.array(_lowercase ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
UpperCAmelCase__ = [[1, 0], [0, 1]]
with self.assertRaises(_lowercase ):
UpperCAmelCase__ = processor.post_process_masks(_lowercase , np.array(_lowercase ) , np.array(_lowercase ) )
@require_vision
@require_tf
class lowercase__ ( unittest.TestCase ):
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = tempfile.mkdtemp()
UpperCAmelCase__ = SamImageProcessor()
UpperCAmelCase__ = SamProcessor(_lowercase )
processor.save_pretrained(self.tmpdirname )
def _UpperCAmelCase ( self : Union[str, Any] , **_lowercase : int ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase ).image_processor
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
UpperCAmelCase__ = [Image.fromarray(np.moveaxis(_lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase__ = self.get_image_processor(do_normalize=_lowercase , padding_value=1.0 )
UpperCAmelCase__ = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_lowercase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowercase )
def _UpperCAmelCase ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = SamProcessor(image_processor=_lowercase )
UpperCAmelCase__ = self.prepare_image_inputs()
UpperCAmelCase__ = image_processor(_lowercase , return_tensors="np" )
UpperCAmelCase__ = processor(images=_lowercase , return_tensors="np" )
input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("reshaped_input_sizes" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_tf
def _UpperCAmelCase ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = SamProcessor(image_processor=_lowercase )
UpperCAmelCase__ = [tf.ones((1, 3, 5, 5) )]
UpperCAmelCase__ = [[17_64, 26_46]]
UpperCAmelCase__ = [[6_83, 10_24]]
UpperCAmelCase__ = processor.post_process_masks(_lowercase , _lowercase , _lowercase , return_tensors="tf" )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
UpperCAmelCase__ = processor.post_process_masks(
_lowercase , tf.convert_to_tensor(_lowercase ) , tf.convert_to_tensor(_lowercase ) , return_tensors="tf" , )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
UpperCAmelCase__ = [np.ones((1, 3, 5, 5) )]
UpperCAmelCase__ = processor.post_process_masks(
_lowercase , np.array(_lowercase ) , np.array(_lowercase ) , return_tensors="tf" )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
UpperCAmelCase__ = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
UpperCAmelCase__ = processor.post_process_masks(
_lowercase , np.array(_lowercase ) , np.array(_lowercase ) , return_tensors="tf" )
@require_vision
@require_torchvision
class lowercase__ ( unittest.TestCase ):
def _UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = tempfile.mkdtemp()
UpperCAmelCase__ = SamImageProcessor()
UpperCAmelCase__ = SamProcessor(_lowercase )
processor.save_pretrained(self.tmpdirname )
def _UpperCAmelCase ( self : str , **_lowercase : Optional[int] ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase ).image_processor
def _UpperCAmelCase ( self : str ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
UpperCAmelCase__ = [Image.fromarray(np.moveaxis(_lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def _UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = SamProcessor(image_processor=_lowercase )
UpperCAmelCase__ = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
UpperCAmelCase__ = [tf.convert_to_tensor(_lowercase )]
UpperCAmelCase__ = [torch.tensor(_lowercase )]
UpperCAmelCase__ = [[17_64, 26_46]]
UpperCAmelCase__ = [[6_83, 10_24]]
UpperCAmelCase__ = processor.post_process_masks(
_lowercase , _lowercase , _lowercase , return_tensors="tf" )
UpperCAmelCase__ = processor.post_process_masks(
_lowercase , _lowercase , _lowercase , return_tensors="pt" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = SamProcessor(image_processor=_lowercase )
UpperCAmelCase__ = self.prepare_image_inputs()
UpperCAmelCase__ = image_processor(_lowercase , return_tensors="pt" )["pixel_values"].numpy()
UpperCAmelCase__ = processor(images=_lowercase , return_tensors="pt" )["pixel_values"].numpy()
UpperCAmelCase__ = image_processor(_lowercase , return_tensors="tf" )["pixel_values"].numpy()
UpperCAmelCase__ = processor(images=_lowercase , return_tensors="tf" )["pixel_values"].numpy()
self.assertTrue(np.allclose(_lowercase , _lowercase ) )
self.assertTrue(np.allclose(_lowercase , _lowercase ) )
self.assertTrue(np.allclose(_lowercase , _lowercase ) )
| 475 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase : Union[str, Any] ={
'''configuration_xlm_roberta''': [
'''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaConfig''',
'''XLMRobertaOnnxConfig''',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict =['''XLMRobertaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict =['''XLMRobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] =[
'''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaForCausalLM''',
'''XLMRobertaForMaskedLM''',
'''XLMRobertaForMultipleChoice''',
'''XLMRobertaForQuestionAnswering''',
'''XLMRobertaForSequenceClassification''',
'''XLMRobertaForTokenClassification''',
'''XLMRobertaModel''',
'''XLMRobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] =[
'''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMRobertaForCausalLM''',
'''TFXLMRobertaForMaskedLM''',
'''TFXLMRobertaForMultipleChoice''',
'''TFXLMRobertaForQuestionAnswering''',
'''TFXLMRobertaForSequenceClassification''',
'''TFXLMRobertaForTokenClassification''',
'''TFXLMRobertaModel''',
'''TFXLMRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] =[
'''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxXLMRobertaForMaskedLM''',
'''FlaxXLMRobertaForCausalLM''',
'''FlaxXLMRobertaForMultipleChoice''',
'''FlaxXLMRobertaForQuestionAnswering''',
'''FlaxXLMRobertaForSequenceClassification''',
'''FlaxXLMRobertaForTokenClassification''',
'''FlaxXLMRobertaModel''',
'''FlaxXLMRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
_lowercase : Any =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 661 | import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
_lowercase : Optional[Any] =WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN'''])
def A__ ( lowercase: Optional[int] ) -> Optional[int]:
A : str =test_results.split(' ' )
A : List[str] =0
A : Tuple =0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
A : List[str] =expressions[-2] if '=' in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowercase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def A__ ( lowercase: List[Any] ) -> str:
A : Union[str, Any] ={}
A : Optional[Any] =None
A : Union[str, Any] =False
for line in failures_short_lines.split('\n' ):
if re.search(r'_ \[doctest\]', lowercase ):
A : List[Any] =True
A : Any =line.split(' ' )[2]
elif in_error and not line.split(' ' )[0].isdigit():
A : Dict =line
A : List[str] =False
return failures
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict ) -> List[str]:
A : Tuple =title
A : Dict =doc_test_results['time_spent'].split(',' )[0]
A : Union[str, Any] =doc_test_results['success']
A : Any =doc_test_results['failures']
A : Optional[Any] =self.n_success + self.n_failures
# Failures and success of the modeling tests
A : Union[str, Any] =doc_test_results
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> str:
A : Any =[self._time_spent]
A : List[str] =0
for time in time_spent:
A : List[Any] =time.split(':' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(SCREAMING_SNAKE_CASE__ ) == 1:
A : List[str] =[0, 0, time_parts[0]]
A , A , A : Tuple =int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
A , A , A : str =total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return f'{int(SCREAMING_SNAKE_CASE__ )}h{int(SCREAMING_SNAKE_CASE__ )}m{int(SCREAMING_SNAKE_CASE__ )}s'
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Dict:
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
f' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Dict:
A : Tuple =40
A : Optional[Any] ={k: v['failed'] for k, v in doc_test_results.items() if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
A : Any =''
for category, failures in category_failures.items():
if len(SCREAMING_SNAKE_CASE__ ) == 0:
continue
if report != "":
report += "\n\n"
report += f'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(SCREAMING_SNAKE_CASE__ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> str:
A : Optional[int] =[self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(SCREAMING_SNAKE_CASE__ )
@staticmethod
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[Any]:
A : Tuple =[
{
'type': 'section',
'text': {
'type': 'plain_text',
'text': 'There was an issue running the tests.',
},
'accessory': {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True},
'url': f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(SCREAMING_SNAKE_CASE__ )} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text='There was an issue running the tests.' , blocks=SCREAMING_SNAKE_CASE__ , )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Optional[int]:
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(self.payload )} ) )
A : Any =f'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else 'All tests passed.'
A : Dict =client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , blocks=self.payload , text=SCREAMING_SNAKE_CASE__ , )
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[int]:
A : List[str] =''
for key, value in failures.items():
A : Any =value[:2_00] + ' [Truncated]' if len(SCREAMING_SNAKE_CASE__ ) > 2_50 else value
failures_text += f'*{key}*\n_{value}_\n\n'
A : Union[str, Any] =job_name
A : Any ={'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}}
if job_link is not None:
A : int ={
'type': 'button',
'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True},
'url': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> List[Any]:
if self.thread_ts is None:
raise ValueError('Can only post reply if a post has been made.' )
A : Union[str, Any] =self.doc_test_results.pop('job_link' )
self.doc_test_results.pop('failures' )
self.doc_test_results.pop('success' )
self.doc_test_results.pop('time_spent' )
A : Union[str, Any] =sorted(self.doc_test_results.items() , key=lambda SCREAMING_SNAKE_CASE__ : t[0] )
for job, job_result in sorted_dict:
if len(job_result['failures'] ):
A : Any =f'*Num failures* :{len(job_result["failed"] )} \n'
A : List[Any] =job_result['failures']
A : Any =self.get_reply_blocks(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , text=SCREAMING_SNAKE_CASE__ )
print('Sending the following reply' )
print(json.dumps({'blocks': blocks} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text=f'Results for {job}' , blocks=SCREAMING_SNAKE_CASE__ , thread_ts=self.thread_ts['ts'] , )
time.sleep(1 )
def A__ ( ) -> Union[str, Any]:
A : Any =os.environ['GITHUB_RUN_ID']
A : List[Any] =F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
A : Union[str, Any] =requests.get(lowercase ).json()
A : List[Any] ={}
try:
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
A : List[str] =math.ceil((result['total_count'] - 100) / 100 )
for i in range(lowercase ):
A : List[str] =requests.get(url + F'&page={i + 2}' ).json()
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
return jobs
except Exception as e:
print('Unknown error, could not fetch links.', lowercase )
return {}
def A__ ( lowercase: str ) -> Optional[Any]:
A : Any ={}
if os.path.exists(lowercase ):
A : List[Any] =os.listdir(lowercase )
for file in files:
try:
with open(os.path.join(lowercase, lowercase ), encoding='utf-8' ) as f:
A : Optional[int] =f.read()
except UnicodeDecodeError as e:
raise ValueError(F'Could not open {os.path.join(lowercase, lowercase )}.' ) from e
return _artifact
def A__ ( ) -> int:
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
A : Dict =name
A : Dict =[]
def __str__( self : Optional[Any] ) -> List[str]:
return self.name
def SCREAMING_SNAKE_CASE_ ( self : int , SCREAMING_SNAKE_CASE__ : str ) -> List[Any]:
self.paths.append({'name': self.name, 'path': path} )
A : Dict[str, Artifact] ={}
A : str =filter(os.path.isdir, os.listdir() )
for directory in directories:
A : Tuple =directory
if artifact_name not in _available_artifacts:
A : int =Artifact(lowercase )
_available_artifacts[artifact_name].add_path(lowercase )
return _available_artifacts
if __name__ == "__main__":
_lowercase : Optional[int] =get_job_links()
_lowercase : str =retrieve_available_artifacts()
_lowercase : List[Any] =collections.OrderedDict(
[
('''*.py''', '''API Examples'''),
('''*.md''', '''MD Examples'''),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
_lowercase : Optional[Any] ={
v: {
'''failed''': [],
'''failures''': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
_lowercase : List[Any] =github_actions_job_links.get('''run_doctests''')
_lowercase : int =available_artifacts['''doc_tests_gpu_test_reports'''].paths[0]
_lowercase : Dict =retrieve_artifact(artifact_path['''name'''])
if "stats" in artifact:
_lowercase , _lowercase , _lowercase : List[Any] =handle_test_results(artifact['''stats'''])
_lowercase : Any =failed
_lowercase : Union[str, Any] =success
_lowercase : str =time_spent[1:-1] + ''', '''
_lowercase : Any =extract_first_line_failure(artifact['''failures_short'''])
for line in artifact["summary_short"].split('''\n'''):
if re.search('''FAILED''', line):
_lowercase : Tuple =line.replace('''FAILED ''', '''''')
_lowercase : int =line.split()[0].replace('''\n''', '''''')
if "::" in line:
_lowercase , _lowercase : str =line.split('''::''')
else:
_lowercase , _lowercase : Union[str, Any] =line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
_lowercase : Any =docs[file_regex]
doc_test_results[category]["failed"].append(test)
_lowercase : Any =all_failures[test] if test in all_failures else '''N/A'''
_lowercase : Tuple =failure
break
_lowercase : Optional[int] =Message('''🤗 Results of the doc tests.''', doc_test_results)
message.post()
message.post_reply()
| 661 | 1 |
'''simple docstring'''
from math import loga
def lowerCamelCase__ ( __lowerCamelCase : int ):
'''simple docstring'''
if a < 0:
raise ValueError('Input value must be a positive integer' )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError('Input value must be a \'int\' type' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 446 |
'''simple docstring'''
def lowerCamelCase__ ( __lowerCamelCase : str = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =set()
# Replace all the whitespace in our sentence
_UpperCAmelCase : Dict =input_str.replace(' ' , '' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(__lowerCamelCase ) == 2_6
def lowerCamelCase__ ( __lowerCamelCase : str = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
_UpperCAmelCase : Tuple =[False] * 2_6
for char in input_str:
if char.islower():
_UpperCAmelCase : Dict =True
elif char.isupper():
_UpperCAmelCase : List[str] =True
return all(__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : str = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
return len({char for char in input_str.lower() if char.isalpha()} ) == 2_6
def lowerCamelCase__ ( ):
'''simple docstring'''
from timeit import timeit
_UpperCAmelCase : List[Any] ='from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'
print(timeit('is_pangram()' , setup=__lowerCamelCase ) )
print(timeit('is_pangram_faster()' , setup=__lowerCamelCase ) )
print(timeit('is_pangram_fastest()' , setup=__lowerCamelCase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 446 | 1 |
"""simple docstring"""
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class lowerCAmelCase :
def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=True , a__=False , a__=True , a__=99 , a__=64 , a__=5 , a__=4 , a__=64 , a__="gelu" , a__=0.1 , a__=0.1 , a__=5_12 , a__=16 , a__=2 , a__=0.02 , a__=3 , a__=4 , a__=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
def __A ( self ):
return MPNetConfig.from_pretrained('microsoft/mpnet-base' )
def __A ( self ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self ):
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ ):
_UpperCAmelCase = MPNetModel(config=a__ )
model.to(a__ )
model.eval()
_UpperCAmelCase = model(a__ , a__ )
_UpperCAmelCase = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ ):
_UpperCAmelCase = MPNetForQuestionAnswering(config=a__ )
model.to(a__ )
model.eval()
_UpperCAmelCase = model(
a__ , attention_mask=a__ , start_positions=a__ , end_positions=a__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = MPNetForSequenceClassification(a__ )
model.to(a__ )
model.eval()
_UpperCAmelCase = model(a__ , attention_mask=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ ):
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = MPNetForMultipleChoice(config=a__ )
model.to(a__ )
model.eval()
_UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = model(
a__ , attention_mask=a__ , labels=a__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = MPNetForTokenClassification(config=a__ )
model.to(a__ )
model.eval()
_UpperCAmelCase = model(a__ , attention_mask=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self ):
_UpperCAmelCase = self.prepare_config_and_inputs()
((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) = config_and_inputs
_UpperCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( snake_case , snake_case , unittest.TestCase ):
lowerCAmelCase__ = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{
"""feature-extraction""": MPNetModel,
"""fill-mask""": MPNetForMaskedLM,
"""question-answering""": MPNetForQuestionAnswering,
"""text-classification""": MPNetForSequenceClassification,
"""token-classification""": MPNetForTokenClassification,
"""zero-shot""": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = True
def __A ( self ):
_UpperCAmelCase = MPNetModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=a__ , hidden_size=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*a__ )
def __A ( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*a__ )
def __A ( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*a__ )
def __A ( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*a__ )
def __A ( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*a__ )
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
@slow
def __A ( self ):
_UpperCAmelCase = MPNetModel.from_pretrained('microsoft/mpnet-base' )
_UpperCAmelCase = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
_UpperCAmelCase = model(a__ )[0]
_UpperCAmelCase = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , a__ )
_UpperCAmelCase = torch.tensor(
[[[-0.0_550, 0.1_943, -0.0_740], [-0.0_562, 0.2_211, -0.0_579], [-0.0_437, 0.3_337, -0.0_641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , a__ , atol=1E-4 ) )
| 494 |
"""simple docstring"""
def __lowerCamelCase ( SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
_UpperCAmelCase = str(bin(SCREAMING_SNAKE_CASE ) )[2:] # remove the leading "0b"
_UpperCAmelCase = str(bin(SCREAMING_SNAKE_CASE ) )[2:] # remove the leading "0b"
_UpperCAmelCase = max(len(SCREAMING_SNAKE_CASE ),len(SCREAMING_SNAKE_CASE ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE ),b_binary.zfill(SCREAMING_SNAKE_CASE ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 494 | 1 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__A : Tuple = StableDiffusionDiffEditPipeline
__A : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"height", "width", "image"} | {"image_latents"}
__A : Dict = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"image"} | {"image_latents"}
__A : Union[str, Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__A : Tuple = frozenset([] )
def __lowercase ( self) -> str:
'''simple docstring'''
torch.manual_seed(0)
a__ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a__ , )
a__ : Optional[Any] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=a__ , set_alpha_to_one=a__ , )
a__ : int = DDIMInverseScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=a__ , set_alpha_to_zero=a__ , )
torch.manual_seed(0)
a__ : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0)
a__ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
a__ : Optional[int] = CLIPTextModel(a__)
a__ : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
a__ : List[str] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""inverse_scheduler""": inverse_scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __lowercase ( self , lowercase , lowercase=0) -> str:
'''simple docstring'''
a__ : Optional[int] = floats_tensor((1, 16, 16) , rng=random.Random(a__)).to(a__)
a__ : int = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(a__)).to(a__)
if str(a__).startswith('mps'):
a__ : Union[str, Any] = torch.manual_seed(a__)
else:
a__ : Dict = torch.Generator(device=a__).manual_seed(a__)
a__ : int = {
"""prompt""": """a dog and a newt""",
"""mask_image""": mask,
"""image_latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def __lowercase ( self , lowercase , lowercase=0) -> Tuple:
'''simple docstring'''
a__ : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__)).to(a__)
a__ : str = image.cpu().permute(0 , 2 , 3 , 1)[0]
a__ : Optional[int] = Image.fromarray(np.uinta(a__)).convert('RGB')
if str(a__).startswith('mps'):
a__ : List[Any] = torch.manual_seed(a__)
else:
a__ : str = torch.Generator(device=a__).manual_seed(a__)
a__ : Any = {
"""image""": image,
"""source_prompt""": """a cat and a frog""",
"""target_prompt""": """a dog and a newt""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""num_maps_per_mask""": 2,
"""mask_encode_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def __lowercase ( self , lowercase , lowercase=0) -> Optional[int]:
'''simple docstring'''
a__ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__)).to(a__)
a__ : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1)[0]
a__ : List[Any] = Image.fromarray(np.uinta(a__)).convert('RGB')
if str(a__).startswith('mps'):
a__ : List[Any] = torch.manual_seed(a__)
else:
a__ : List[str] = torch.Generator(device=a__).manual_seed(a__)
a__ : List[Any] = {
"""image""": image,
"""prompt""": """a cat and a frog""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""decode_latents""": True,
"""output_type""": """numpy""",
}
return inputs
def __lowercase ( self) -> Dict:
'''simple docstring'''
if not hasattr(self.pipeline_class , '_optional_components'):
return
a__ : List[str] = self.get_dummy_components()
a__ : List[str] = self.pipeline_class(**a__)
pipe.to(a__)
pipe.set_progress_bar_config(disable=a__)
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(a__ , a__ , a__)
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components})
a__ : Dict = self.get_dummy_inputs(a__)
a__ : Union[str, Any] = pipe(**a__)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(a__)
a__ : str = self.pipeline_class.from_pretrained(a__)
pipe_loaded.to(a__)
pipe_loaded.set_progress_bar_config(disable=a__)
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(a__ , a__) is None , F'`{optional_component}` did not stay set to None after loading.' , )
a__ : str = self.get_dummy_inputs(a__)
a__ : List[Any] = pipe_loaded(**a__)[0]
a__ : Any = np.abs(output - output_loaded).max()
self.assertLess(a__ , 1e-4)
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : List[str] = """cpu"""
a__ : Tuple = self.get_dummy_components()
a__ : str = self.pipeline_class(**a__)
pipe.to(a__)
pipe.set_progress_bar_config(disable=a__)
a__ : Tuple = self.get_dummy_mask_inputs(a__)
a__ : Optional[Any] = pipe.generate_mask(**a__)
a__ : List[str] = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16))
a__ : Dict = np.array([0] * 9)
a__ : Optional[Any] = np.abs(mask_slice.flatten() - expected_slice).max()
self.assertLessEqual(a__ , 1e-3)
self.assertEqual(mask[0, -3, -4] , 0)
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Optional[int] = """cpu"""
a__ : Optional[Any] = self.get_dummy_components()
a__ : Optional[int] = self.pipeline_class(**a__)
pipe.to(a__)
pipe.set_progress_bar_config(disable=a__)
a__ : Optional[int] = self.get_dummy_inversion_inputs(a__)
a__ : Optional[Any] = pipe.invert(**a__).images
a__ : List[str] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3))
a__ : Union[str, Any] = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , )
a__ : str = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(a__ , 1e-3)
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=5e-3)
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : Optional[Any] = """cpu"""
a__ : Union[str, Any] = self.get_dummy_components()
a__ : Union[str, Any] = {"""beta_start""": 0.0_00_85, """beta_end""": 0.0_12, """beta_schedule""": """scaled_linear"""}
a__ : List[Any] = DPMSolverMultistepScheduler(**a__)
a__ : List[str] = DPMSolverMultistepInverseScheduler(**a__)
a__ : List[Any] = self.pipeline_class(**a__)
pipe.to(a__)
pipe.set_progress_bar_config(disable=a__)
a__ : Optional[Any] = self.get_dummy_inversion_inputs(a__)
a__ : Tuple = pipe.invert(**a__).images
a__ : List[str] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3))
a__ : Union[str, Any] = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , )
a__ : int = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(a__ , 1e-3)
@require_torch_gpu
@slow
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def __lowercase ( cls) -> Optional[int]:
'''simple docstring'''
a__ : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png')
a__ : str = raw_image.convert('RGB').resize((768, 768))
a__ : List[str] = raw_image
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ : Dict = torch.manual_seed(0)
a__ : Optional[int] = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=a__ , torch_dtype=torch.floataa)
a__ : List[Any] = DDIMScheduler.from_config(pipe.scheduler.config)
a__ : List[Any] = DDIMInverseScheduler.from_config(pipe.scheduler.config)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=a__)
a__ : Tuple = """a bowl of fruit"""
a__ : List[str] = """a bowl of pears"""
a__ : Tuple = pipe.generate_mask(
image=self.raw_image , source_prompt=a__ , target_prompt=a__ , generator=a__ , )
a__ : int = pipe.invert(
prompt=a__ , image=self.raw_image , inpaint_strength=0.7 , generator=a__).latents
a__ : Union[str, Any] = pipe(
prompt=a__ , mask_image=a__ , image_latents=a__ , generator=a__ , negative_prompt=a__ , inpaint_strength=0.7 , output_type='numpy' , ).images[0]
a__ : Tuple = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png').resize((768, 768)))
/ 255
)
assert np.abs((expected_image - image).max()) < 5e-1
def __lowercase ( self) -> str:
'''simple docstring'''
a__ : List[str] = torch.manual_seed(0)
a__ : Optional[Any] = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=a__ , torch_dtype=torch.floataa)
a__ : Tuple = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
a__ : Optional[Any] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=a__)
a__ : int = """a bowl of fruit"""
a__ : Dict = """a bowl of pears"""
a__ : Any = pipe.generate_mask(
image=self.raw_image , source_prompt=a__ , target_prompt=a__ , generator=a__ , )
a__ : Tuple = pipe.invert(
prompt=a__ , image=self.raw_image , inpaint_strength=0.7 , generator=a__ , num_inference_steps=25 , ).latents
a__ : Optional[int] = pipe(
prompt=a__ , mask_image=a__ , image_latents=a__ , generator=a__ , negative_prompt=a__ , inpaint_strength=0.7 , num_inference_steps=25 , output_type='numpy' , ).images[0]
a__ : Union[str, Any] = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png').resize((768, 768)))
/ 255
)
assert np.abs((expected_image - image).max()) < 5e-1
| 302 | """simple docstring"""
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class __A :
def __init__( self , a__ , a__ , a__ ):
if dst_width < 0 or dst_height < 0:
raise ValueError("""Destination width/height should be > 0""" )
_lowerCAmelCase : List[Any] = img
_lowerCAmelCase : Dict = img.shape[1]
_lowerCAmelCase : Optional[Any] = img.shape[0]
_lowerCAmelCase : str = dst_width
_lowerCAmelCase : Tuple = dst_height
_lowerCAmelCase : Optional[int] = self.src_w / self.dst_w
_lowerCAmelCase : List[str] = self.src_h / self.dst_h
_lowerCAmelCase : Dict = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def __A ( self ):
for i in range(self.dst_h ):
for j in range(self.dst_w ):
_lowerCAmelCase : int = self.img[self.get_y(a__ )][self.get_x(a__ )]
def __A ( self , a__ ):
return int(self.ratio_x * x )
def __A ( self , a__ ):
return int(self.ratio_y * y )
if __name__ == "__main__":
_a , _a : int = 800, 600
_a : Optional[int] = imread('image_data/lena.jpg', 1)
_a : Tuple = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F"""Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}""", n.output
)
waitKey(0)
destroyAllWindows()
| 213 | 0 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {name: getattr(transformers, name + 'Fast') for name in SLOW_TO_FAST_CONVERTERS}
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> str:
'''simple docstring'''
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f'''Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.''' )
if tokenizer_name is None:
__lowercase = TOKENIZER_CLASSES
else:
__lowercase = {tokenizer_name: getattr(_UpperCAmelCase , tokenizer_name + "Fast" )}
logger.info(f'''Loading tokenizer classes: {tokenizer_names}''' )
for tokenizer_name in tokenizer_names:
__lowercase = TOKENIZER_CLASSES[tokenizer_name]
__lowercase = True
if checkpoint_name is None:
__lowercase = list(tokenizer_class.max_model_input_sizes.keys() )
else:
__lowercase = [checkpoint_name]
logger.info(f'''For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}''' )
for checkpoint in checkpoint_names:
logger.info(f'''Loading {tokenizer_class.__class__.__name__} {checkpoint}''' )
# Load tokenizer
__lowercase = tokenizer_class.from_pretrained(_UpperCAmelCase , force_download=_UpperCAmelCase )
# Save fast tokenizer
logger.info(f'''Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}''' )
# For organization names we create sub-directories
if "/" in checkpoint:
__lowercase , __lowercase = checkpoint.split("/" )
__lowercase = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
elif add_prefix:
__lowercase = checkpoint
__lowercase = dump_path
else:
__lowercase = None
__lowercase = dump_path
logger.info(f'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
__lowercase = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
__lowercase = file_path.split(_UpperCAmelCase )[-1][0]
if next_char == "/":
__lowercase = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = None
logger.info(f'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
__lowercase = tokenizer.save_pretrained(
_UpperCAmelCase , legacy_format=_UpperCAmelCase , filename_prefix=_UpperCAmelCase )
logger.info(f'''=> File names {file_names}''' )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(_UpperCAmelCase )
logger.info(f'''=> removing {file_name}''' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output generated fast tokenizer files.'
)
parser.add_argument(
'--tokenizer_name',
default=None,
type=str,
help=(
F"Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will "
'download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--checkpoint_name',
default=None,
type=str,
help='Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.',
)
parser.add_argument(
'--force_download',
action='store_true',
help='Re-download checkpoints.',
)
lowerCAmelCase__ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 576 | import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCAmelCase__ = logging.getLogger(__name__)
lowerCAmelCase__ = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
lowerCAmelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class snake_case :
"""simple docstring"""
__lowerCAmelCase = field(
default=__snake_case ,metadata={
"""help""": (
"""The model checkpoint for weights initialization. Leave None if you want to train a model from"""
""" scratch."""
)
} ,)
__lowerCAmelCase = field(
default=__snake_case ,metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(__snake_case )} ,)
__lowerCAmelCase = field(
default=__snake_case ,metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__lowerCAmelCase = field(
default=__snake_case ,metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__lowerCAmelCase = field(
default=__snake_case ,metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} ,)
@dataclass
class snake_case :
"""simple docstring"""
__lowerCAmelCase = field(
default=__snake_case ,metadata={"""help""": """The input training data file (a text file)."""} )
__lowerCAmelCase = field(
default=__snake_case ,metadata={
"""help""": (
"""The input training data files (multiple files in glob format). """
"""Very often splitting large files to smaller files can prevent tokenizer going out of memory"""
)
} ,)
__lowerCAmelCase = field(
default=__snake_case ,metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} ,)
__lowerCAmelCase = field(
default=__snake_case ,metadata={"""help""": """An optional input train ref data file for whole word mask in Chinese."""} ,)
__lowerCAmelCase = field(
default=__snake_case ,metadata={"""help""": """An optional input eval ref data file for whole word mask in Chinese."""} ,)
__lowerCAmelCase = field(
default=__snake_case ,metadata={"""help""": """Whether distinct lines of text in the dataset are to be handled as distinct sequences."""} ,)
__lowerCAmelCase = field(
default=__snake_case ,metadata={"""help""": """Train with masked-language modeling loss instead of language modeling."""} )
__lowerCAmelCase = field(default=__snake_case ,metadata={"""help""": """Whether ot not to use whole word mask."""} )
__lowerCAmelCase = field(
default=0.15 ,metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
__lowerCAmelCase = field(
default=1 / 6 ,metadata={
"""help""": (
"""Ratio of length of a span of masked tokens to surrounding context length for permutation language"""
""" modeling."""
)
} ,)
__lowerCAmelCase = field(
default=5 ,metadata={"""help""": """Maximum length of a span of masked tokens for permutation language modeling."""} )
__lowerCAmelCase = field(
default=-1 ,metadata={
"""help""": (
"""Optional input sequence length after tokenization."""
"""The training dataset will be truncated in block of this size for training."""
"""Default to the model max input length for single sentence inputs (take into account special tokens)."""
)
} ,)
__lowerCAmelCase = field(
default=__snake_case ,metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = False , _UpperCAmelCase = None , ) -> Optional[int]:
'''simple docstring'''
def _dataset(_UpperCAmelCase , _UpperCAmelCase=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("You need to set world whole masking and mlm to True for Chinese Whole Word Mask" )
return LineByLineWithRefDataset(
tokenizer=_UpperCAmelCase , file_path=_UpperCAmelCase , block_size=args.block_size , ref_path=_UpperCAmelCase , )
return LineByLineTextDataset(tokenizer=_UpperCAmelCase , file_path=_UpperCAmelCase , block_size=args.block_size )
else:
return TextDataset(
tokenizer=_UpperCAmelCase , file_path=_UpperCAmelCase , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=_UpperCAmelCase , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(_UpperCAmelCase ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def __lowercase ( ) -> Optional[Any]:
'''simple docstring'''
__lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file "
"or remove the --do_eval argument." )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , _UpperCAmelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
__lowercase = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
__lowercase = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.tokenizer_name:
__lowercase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"
" script, save it,and load it from here, using --tokenizer_name" )
if model_args.model_name_or_path:
__lowercase = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , )
else:
logger.info("Training new model from scratch" )
__lowercase = AutoModelWithLMHead.from_config(_UpperCAmelCase )
model.resize_token_embeddings(len(_UpperCAmelCase ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"
"--mlm flag (masked language modeling)." )
if data_args.block_size <= 0:
__lowercase = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
__lowercase = min(data_args.block_size , tokenizer.max_len )
# Get datasets
__lowercase = (
get_dataset(_UpperCAmelCase , tokenizer=_UpperCAmelCase , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
__lowercase = (
get_dataset(_UpperCAmelCase , tokenizer=_UpperCAmelCase , evaluate=_UpperCAmelCase , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
__lowercase = DataCollatorForPermutationLanguageModeling(
tokenizer=_UpperCAmelCase , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
__lowercase = DataCollatorForWholeWordMask(
tokenizer=_UpperCAmelCase , mlm_probability=data_args.mlm_probability )
else:
__lowercase = DataCollatorForLanguageModeling(
tokenizer=_UpperCAmelCase , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__lowercase = Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , data_collator=_UpperCAmelCase , train_dataset=_UpperCAmelCase , eval_dataset=_UpperCAmelCase , prediction_loss_only=_UpperCAmelCase , )
# Training
if training_args.do_train:
__lowercase = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=_UpperCAmelCase )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
__lowercase = trainer.evaluate()
__lowercase = math.exp(eval_output["eval_loss"] )
__lowercase = {"perplexity": perplexity}
__lowercase = os.path.join(training_args.output_dir , "eval_results_lm.txt" )
if trainer.is_world_master():
with open(_UpperCAmelCase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" , _UpperCAmelCase , str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
results.update(_UpperCAmelCase )
return results
def __lowercase ( _UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 576 | 1 |
'''simple docstring'''
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple=7 ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = None
if token is not None:
UpperCAmelCase_ : List[str] = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F'''Bearer {token}'''}
# The id of a workflow (not of a workflow run)
UpperCAmelCase_ : Any = '''636036'''
UpperCAmelCase_ : Any = F'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'''
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'''
UpperCAmelCase_ : List[Any] = requests.get(__a , headers=__a ).json()
return result["workflow_runs"]
def a__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : str = get_daily_ci_runs(__a )
UpperCAmelCase_ : List[Any] = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
UpperCAmelCase_ : int = workflow_run['''id''']
break
return workflow_run_id
def a__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Dict = get_last_daily_ci_runs(__a )
if workflow_run_id is not None:
UpperCAmelCase_ : Optional[int] = get_artifacts_links(worflow_run_id=__a , token=__a )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
UpperCAmelCase_ : List[str] = artifacts_links[artifact_name]
download_artifact(
artifact_name=__a , artifact_url=__a , output_dir=__a , token=__a )
def a__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Tuple ) -> Union[str, Any]:
"""simple docstring"""
get_last_daily_ci_artifacts(__a , __a , __a )
UpperCAmelCase_ : List[Any] = {}
for artifact_name in artifact_names:
UpperCAmelCase_ : List[str] = os.path.join(__a , F'''{artifact_name}.zip''' )
if os.path.isfile(__a ):
UpperCAmelCase_ : Any = {}
with zipfile.ZipFile(__a ) as z:
for filename in z.namelist():
if not os.path.isdir(__a ):
# read the file
with z.open(__a ) as f:
UpperCAmelCase_ : Optional[int] = f.read().decode("UTF-8" )
return results
| 71 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
def __UpperCAmelCase ( __a : str ) -> List[Any]:
"""simple docstring"""
_a : Tuple = SwinConfig.from_pretrained(
'''microsoft/swin-tiny-patch4-window7-224''' ,out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
_a : Dict = MaskFormerConfig(backbone_config=__a )
_a : Optional[Any] = '''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
_a : Optional[Any] = 847
_a : List[Any] = '''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
_a : Union[str, Any] = 150
_a : Any = '''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
_a : int = 171
_a : List[str] = '''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
_a : Dict = 133
_a : Optional[Any] = '''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
_a : List[Any] = 19
_a : Optional[Any] = '''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
_a : List[Any] = 65
_a : Dict = '''mapillary-vistas-id2label.json'''
_a : Optional[int] = json.load(open(hf_hub_download(__a ,__a ,repo_type='''dataset''' ) ,'''r''' ) )
_a : Tuple = {int(__a ): v for k, v in idalabel.items()}
return config
def __UpperCAmelCase ( __a : Optional[Any] ) -> Tuple:
"""simple docstring"""
_a : Optional[Any] = []
# stem
# fmt: off
rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.layers.{i}.downsample.reduction.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''') )
for source_index, target_index in zip(range(3 ,0 ,-1 ) ,range(0 ,3 ) ):
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''') )
rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''') )
# heads on top
rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''') )
for i in range(3 ):
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", F"""mask_embedder.{i}.0.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", F"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def __UpperCAmelCase ( __a : List[str] ,__a : List[Any] ,__a : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a : str = dct.pop(__a )
_a : str = val
def __UpperCAmelCase ( __a : List[Any] ,__a : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_a : Union[str, Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_a : Optional[Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_a : List[Any] = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
_a : Optional[int] = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_a : Optional[int] = in_proj_weight[:dim, :]
_a : List[Any] = in_proj_bias[: dim]
_a : Optional[int] = in_proj_weight[
dim : dim * 2, :
]
_a : Tuple = in_proj_bias[
dim : dim * 2
]
_a : int = in_proj_weight[
-dim :, :
]
_a : Optional[int] = in_proj_bias[-dim :]
# fmt: on
def __UpperCAmelCase ( __a : List[str] ,__a : List[Any] ) -> List[Any]:
"""simple docstring"""
_a : Optional[int] = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
_a : Union[str, Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
_a : List[Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_a : Union[str, Any] = in_proj_weight[: hidden_size, :]
_a : List[Any] = in_proj_bias[:config.hidden_size]
_a : Dict = in_proj_weight[hidden_size : hidden_size * 2, :]
_a : Any = in_proj_bias[hidden_size : hidden_size * 2]
_a : Tuple = in_proj_weight[-hidden_size :, :]
_a : List[Any] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
_a : List[Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
_a : List[str] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_a : Optional[Any] = in_proj_weight[: hidden_size, :]
_a : Any = in_proj_bias[:config.hidden_size]
_a : List[str] = in_proj_weight[hidden_size : hidden_size * 2, :]
_a : Optional[Any] = in_proj_bias[hidden_size : hidden_size * 2]
_a : List[str] = in_proj_weight[-hidden_size :, :]
_a : int = in_proj_bias[-hidden_size :]
# fmt: on
def __UpperCAmelCase ( ) -> torch.Tensor:
"""simple docstring"""
_a : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_a : Dict = Image.open(requests.get(__a ,stream=__a ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( __a : str ,__a : str ,__a : str ,__a : bool = False ) -> Union[str, Any]:
"""simple docstring"""
_a : Optional[Any] = get_maskformer_config(__a )
# load original state_dict
with open(__a ,'''rb''' ) as f:
_a : str = pickle.load(__a )
_a : Union[str, Any] = data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
_a : Any = create_rename_keys(__a )
for src, dest in rename_keys:
rename_key(__a ,__a ,__a )
read_in_swin_q_k_v(__a ,config.backbone_config )
read_in_decoder_q_k_v(__a ,__a )
# update to torch tensors
for key, value in state_dict.items():
_a : Optional[int] = torch.from_numpy(__a )
# load 🤗 model
_a : Dict = MaskFormerForInstanceSegmentation(__a )
model.eval()
for name, param in model.named_parameters():
print(__a ,param.shape )
_a , _a : Tuple = model.load_state_dict(__a ,strict=__a )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(__a ) == 0, F"""Unexpected keys: {unexpected_keys}"""
# verify results
_a : Union[str, Any] = prepare_img()
if "vistas" in model_name:
_a : int = 65
elif "cityscapes" in model_name:
_a : Tuple = 65_535
else:
_a : str = 255
_a : Dict = True if '''ade''' in model_name else False
_a : Optional[Any] = MaskFormerImageProcessor(ignore_index=__a ,reduce_labels=__a )
_a : Optional[Any] = image_processor(__a ,return_tensors='''pt''' )
_a : int = model(**__a )
print('''Logits:''' ,outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
_a : Union[str, Any] = torch.tensor(
[[3.63_53, -4.47_70, -2.60_65], [0.50_81, -4.23_94, -3.53_43], [2.19_09, -5.03_53, -1.93_23]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] ,__a ,atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(__a ).mkdir(exist_ok=__a )
model.save_pretrained(__a )
image_processor.save_pretrained(__a )
if push_to_hub:
print('''Pushing model and image processor to the hub...''' )
model.push_to_hub(F"""nielsr/{model_name}""" )
image_processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''maskformer-swin-tiny-ade''',
type=str,
help=('''Name of the MaskFormer model you\'d like to convert''',),
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl''',
type=str,
help='''Path to the original state dict (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
a__ = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 14 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ : Any = {
'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'],
'tokenization_electra': ['ElectraTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Union[str, Any] = ['ElectraTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Tuple = [
'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'ElectraForCausalLM',
'ElectraForMaskedLM',
'ElectraForMultipleChoice',
'ElectraForPreTraining',
'ElectraForQuestionAnswering',
'ElectraForSequenceClassification',
'ElectraForTokenClassification',
'ElectraModel',
'ElectraPreTrainedModel',
'load_tf_weights_in_electra',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : int = [
'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFElectraForMaskedLM',
'TFElectraForMultipleChoice',
'TFElectraForPreTraining',
'TFElectraForQuestionAnswering',
'TFElectraForSequenceClassification',
'TFElectraForTokenClassification',
'TFElectraModel',
'TFElectraPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[int] = [
'FlaxElectraForCausalLM',
'FlaxElectraForMaskedLM',
'FlaxElectraForMultipleChoice',
'FlaxElectraForPreTraining',
'FlaxElectraForQuestionAnswering',
'FlaxElectraForSequenceClassification',
'FlaxElectraForTokenClassification',
'FlaxElectraModel',
'FlaxElectraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
snake_case_ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 350 |
'''simple docstring'''
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
snake_case_ : Tuple = logging.get_logger(__name__)
def __snake_case ( _UpperCAmelCase : str, _UpperCAmelCase : int, _UpperCAmelCase : Optional[int]):
return [
int(1000 * (box[0] / width)),
int(1000 * (box[1] / height)),
int(1000 * (box[2] / width)),
int(1000 * (box[3] / height)),
]
def __snake_case ( _UpperCAmelCase : np.ndarray, _UpperCAmelCase : Optional[str], _UpperCAmelCase : Optional[str]):
UpperCamelCase = to_pil_image(_UpperCAmelCase)
UpperCamelCase , UpperCamelCase = pil_image.size
UpperCamelCase = pytesseract.image_to_data(_UpperCAmelCase, lang=_UpperCAmelCase, output_type='''dict''', config=_UpperCAmelCase)
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
UpperCamelCase = [idx for idx, word in enumerate(_UpperCAmelCase) if not word.strip()]
UpperCamelCase = [word for idx, word in enumerate(_UpperCAmelCase) if idx not in irrelevant_indices]
UpperCamelCase = [coord for idx, coord in enumerate(_UpperCAmelCase) if idx not in irrelevant_indices]
UpperCamelCase = [coord for idx, coord in enumerate(_UpperCAmelCase) if idx not in irrelevant_indices]
UpperCamelCase = [coord for idx, coord in enumerate(_UpperCAmelCase) if idx not in irrelevant_indices]
UpperCamelCase = [coord for idx, coord in enumerate(_UpperCAmelCase) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
UpperCamelCase = []
for x, y, w, h in zip(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase):
UpperCamelCase = [x, y, x + w, y + h]
actual_boxes.append(_UpperCAmelCase)
# finally, normalize the bounding boxes
UpperCamelCase = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase))
assert len(_UpperCAmelCase) == len(_UpperCAmelCase), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class lowercase__ ( snake_case_ ):
'''simple docstring'''
_snake_case = ['''pixel_values''']
def __init__( self , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = PILImageResampling.BILINEAR , lowerCamelCase__ = True , lowerCamelCase__ = 1 / 2_5_5 , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = "" , **lowerCamelCase__ , ):
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
UpperCamelCase = size if size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
UpperCamelCase = get_size_dict(lowerCamelCase__ )
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = resample
UpperCamelCase = do_rescale
UpperCamelCase = rescale_value
UpperCamelCase = do_normalize
UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
UpperCamelCase = apply_ocr
UpperCamelCase = ocr_lang
UpperCamelCase = tesseract_config
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = PILImageResampling.BILINEAR , lowerCamelCase__ = None , **lowerCamelCase__ , ):
'''simple docstring'''
UpperCamelCase = get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
UpperCamelCase = (size['''height'''], size['''width'''])
return resize(lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , **lowerCamelCase__ , ):
'''simple docstring'''
return rescale(lowerCamelCase__ , scale=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , **lowerCamelCase__ , ):
'''simple docstring'''
return normalize(lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__=None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = ChannelDimension.FIRST , **lowerCamelCase__ , ):
'''simple docstring'''
UpperCamelCase = do_resize if do_resize is not None else self.do_resize
UpperCamelCase = size if size is not None else self.size
UpperCamelCase = get_size_dict(lowerCamelCase__ )
UpperCamelCase = resample if resample is not None else self.resample
UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase = image_mean if image_mean is not None else self.image_mean
UpperCamelCase = image_std if image_std is not None else self.image_std
UpperCamelCase = apply_ocr if apply_ocr is not None else self.apply_ocr
UpperCamelCase = ocr_lang if ocr_lang is not None else self.ocr_lang
UpperCamelCase = tesseract_config if tesseract_config is not None else self.tesseract_config
UpperCamelCase = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
UpperCamelCase = [to_numpy_array(lowerCamelCase__ ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
UpperCamelCase = []
UpperCamelCase = []
for image in images:
UpperCamelCase , UpperCamelCase = apply_tesseract(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
words_batch.append(lowerCamelCase__ )
boxes_batch.append(lowerCamelCase__ )
if do_resize:
UpperCamelCase = [self.resize(image=lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ ) for image in images]
if do_rescale:
UpperCamelCase = [self.rescale(image=lowerCamelCase__ , scale=lowerCamelCase__ ) for image in images]
if do_normalize:
UpperCamelCase = [self.normalize(image=lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ ) for image in images]
UpperCamelCase = [to_channel_dimension_format(lowerCamelCase__ , lowerCamelCase__ ) for image in images]
UpperCamelCase = BatchFeature(data={'''pixel_values''': images} , tensor_type=lowerCamelCase__ )
if apply_ocr:
UpperCamelCase = words_batch
UpperCamelCase = boxes_batch
return data
| 350 | 1 |
from __future__ import annotations
class _a :
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase : str , UpperCAmelCase : str ):
A_ , A_ = text, pattern
A_ , A_ = len(UpperCAmelCase ), len(UpperCAmelCase )
def __A ( self : Union[str, Any] , UpperCAmelCase : str ):
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def __A ( self : List[str] , UpperCAmelCase : int ):
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def __A ( self : Optional[int] ):
# searches pattern in text and returns index positions
A_ = []
for i in range(self.textLen - self.patLen + 1 ):
A_ = self.mismatch_in_text(UpperCAmelCase )
if mismatch_index == -1:
positions.append(UpperCAmelCase )
else:
A_ = self.match_in_pattern(self.text[mismatch_index] )
A_ = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
__a :Dict = 'ABAABA'
__a :int = 'AB'
__a :List[str] = BoyerMooreSearch(text, pattern)
__a :str = bms.bad_character_heuristic()
if len(positions) == 0:
print('No match found')
else:
print('Pattern found in following positions: ')
print(positions) | 86 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A__ :
def __init__( self : Any , _a : int , _a : Tuple=13 , _a : Tuple=30 , _a : Union[str, Any]=2 , _a : Union[str, Any]=3 , _a : Dict=True , _a : str=True , _a : List[str]=32 , _a : str=5 , _a : int=4 , _a : Union[str, Any]=37 , _a : List[str]="gelu" , _a : Optional[int]=0.1 , _a : str=0.1 , _a : int=10 , _a : Optional[Any]=0.02 , _a : Union[str, Any]=None , ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =patch_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =is_training
_SCREAMING_SNAKE_CASE =use_labels
_SCREAMING_SNAKE_CASE =hidden_size
_SCREAMING_SNAKE_CASE =num_hidden_layers
_SCREAMING_SNAKE_CASE =num_attention_heads
_SCREAMING_SNAKE_CASE =intermediate_size
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =hidden_dropout_prob
_SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE =type_sequence_label_size
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_SCREAMING_SNAKE_CASE =(image_size // patch_size) ** 2
_SCREAMING_SNAKE_CASE =num_patches + 1
def A ( self : str ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE =None
if self.use_labels:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE =self.get_config()
return config, pixel_values, labels
def A ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def A ( self : Any , _a : List[Any] , _a : List[Any] , _a : Any ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =ViTMSNModel(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : int , _a : List[Any] , _a : Tuple , _a : Tuple ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.type_sequence_label_size
_SCREAMING_SNAKE_CASE =ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a , labels=_a )
print('Pixel and labels shape: {pixel_values.shape}, {labels.shape}' )
print('Labels: {labels}' )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A ( self : Optional[int] ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =config_and_inputs
_SCREAMING_SNAKE_CASE ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( A__ , A__ , unittest.TestCase ):
A__ = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
A__ = (
{'feature-extraction': ViTMSNModel, 'image-classification': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
A__ = False
A__ = False
A__ = False
A__ = False
def A ( self : str ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =ViTMSNModelTester(self )
_SCREAMING_SNAKE_CASE =ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def A ( self : Optional[int] ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMSN does not use inputs_embeds' )
def A ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
pass
def A ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_SCREAMING_SNAKE_CASE =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def A ( self : Dict ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_a )
_SCREAMING_SNAKE_CASE =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE =[*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE =['pixel_values']
self.assertListEqual(arg_names[:1] , _a )
def A ( self : str ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def A ( self : Optional[Any] ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def A ( self : Optional[Any] ) -> int:
'''simple docstring'''
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE =ViTMSNModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def _lowerCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
@cached_property
def A ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
return ViTImageProcessor.from_pretrained('facebook/vit-msn-small' ) if is_vision_available() else None
@slow
def A ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(2 )
_SCREAMING_SNAKE_CASE =ViTMSNForImageClassification.from_pretrained('facebook/vit-msn-small' ).to(_a )
_SCREAMING_SNAKE_CASE =self.default_image_processor
_SCREAMING_SNAKE_CASE =prepare_img()
_SCREAMING_SNAKE_CASE =image_processor(images=_a , return_tensors='pt' ).to(_a )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**_a )
# verify the logits
_SCREAMING_SNAKE_CASE =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
_SCREAMING_SNAKE_CASE =torch.tensor([-0.08_03, -0.44_54, -0.23_75] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
| 405 | 0 |
"""simple docstring"""
from math import factorial
lowercase__ : Union[str, Any] = {str(d): factorial(d) for d in range(10)}
def __lowercase ( _a ):
return sum(DIGIT_FACTORIAL[d] for d in str(_a ) )
def __lowercase ( ):
snake_case_ : Optional[Any] = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , _a ) if sum_of_digit_factorial(_a ) == i )
if __name__ == "__main__":
print(f'{solution() = }')
| 713 |
"""simple docstring"""
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def __lowercase ( _a , _a , _a ):
snake_case_ : Tuple = AutoConfig.from_pretrained(_a )
snake_case_ : Tuple = FlaxAutoModelForSeqaSeqLM.from_config(config=_a )
snake_case_ : Union[str, Any] = checkpoints.load_tax_checkpoint(_a )
snake_case_ : Optional[int] = '''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp''']
if config.model_type == "t5":
snake_case_ : str = '''SelfAttention'''
if config.model_type == "longt5" and config.encoder_attention_type == "local":
snake_case_ : List[str] = '''LocalSelfAttention'''
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
snake_case_ : Dict = '''TransientGlobalSelfAttention'''
else:
raise ValueError(
'''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'''
''' attribute with a value from [\'local\', \'transient-global].''' )
# Encoder
for layer_index in range(config.num_layers ):
snake_case_ : Any = f"layers_{str(_a )}"
# Self-Attention
snake_case_ : Dict = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel''']
snake_case_ : Tuple = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel''']
snake_case_ : Tuple = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel''']
snake_case_ : Optional[int] = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel''']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
snake_case_ : Tuple = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale''']
# Layer Normalization
snake_case_ : List[str] = tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale''']
if split_mlp_wi:
snake_case_ : int = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
snake_case_ : List[str] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
snake_case_ : int = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
snake_case_ : str = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
snake_case_ : Optional[Any] = tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
snake_case_ : Optional[Any] = flax_model.params['''encoder''']['''block'''][str(_a )]['''layer''']
snake_case_ : List[str] = tax_attention_key
snake_case_ : Optional[Any] = tax_attention_out
snake_case_ : Any = tax_attention_query
snake_case_ : str = tax_attention_value
snake_case_ : Dict = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
snake_case_ : Union[str, Any] = tax_global_layer_norm
if split_mlp_wi:
snake_case_ : Any = tax_mlp_wi_a
snake_case_ : List[Any] = tax_mlp_wi_a
else:
snake_case_ : Union[str, Any] = tax_mlp_wi
snake_case_ : List[Any] = tax_mlp_wo
snake_case_ : int = tax_mlp_layer_norm
snake_case_ : Any = flax_model_encoder_layer_block
# Only for layer 0:
snake_case_ : Optional[int] = tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T
snake_case_ : Any = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
snake_case_ : List[str] = tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T
snake_case_ : Tuple = tax_encoder_global_rel_embedding
# Assigning
snake_case_ : Dict = tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale''']
snake_case_ : Any = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
snake_case_ : Tuple = f"layers_{str(_a )}"
# Self-Attention
snake_case_ : List[str] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel''']
snake_case_ : Optional[int] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel''']
snake_case_ : List[Any] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel''']
snake_case_ : Optional[int] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel''']
# Layer Normalization
snake_case_ : str = tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][
'''scale'''
]
# Encoder-Decoder-Attention
snake_case_ : Any = tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention''']
snake_case_ : Optional[Any] = tax_enc_dec_attention_module['''key''']['''kernel''']
snake_case_ : str = tax_enc_dec_attention_module['''out''']['''kernel''']
snake_case_ : Union[str, Any] = tax_enc_dec_attention_module['''query''']['''kernel''']
snake_case_ : List[str] = tax_enc_dec_attention_module['''value''']['''kernel''']
# Layer Normalization
snake_case_ : Optional[int] = tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale''']
# MLP
if split_mlp_wi:
snake_case_ : List[Any] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
snake_case_ : List[str] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
snake_case_ : Dict = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
snake_case_ : List[Any] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
snake_case_ : List[Any] = tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
snake_case_ : Dict = flax_model.params['''decoder''']['''block'''][str(_a )]['''layer''']
snake_case_ : int = tax_attention_key
snake_case_ : List[Any] = tax_attention_out
snake_case_ : Any = tax_attention_query
snake_case_ : Dict = tax_attention_value
snake_case_ : str = tax_pre_attention_layer_norm
snake_case_ : Any = tax_enc_dec_attention_key
snake_case_ : str = tax_enc_dec_attention_out
snake_case_ : int = tax_enc_dec_attention_query
snake_case_ : Any = tax_enc_dec_attention_value
snake_case_ : Optional[Any] = tax_cross_layer_norm
if split_mlp_wi:
snake_case_ : Tuple = tax_mlp_wi_a
snake_case_ : List[Any] = tax_mlp_wi_a
else:
snake_case_ : List[Any] = tax_mlp_wi
snake_case_ : Dict = tax_mlp_wo
snake_case_ : List[Any] = txa_mlp_layer_norm
snake_case_ : Optional[int] = flax_model_decoder_layer_block
# Decoder Normalization
snake_case_ : str = tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale''']
snake_case_ : Tuple = txa_decoder_norm
# Only for layer 0:
snake_case_ : str = tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T
snake_case_ : Optional[Any] = tax_decoder_rel_embedding
# Token Embeddings
snake_case_ : Union[str, Any] = tax_model['''target''']['''token_embedder''']['''embedding''']
snake_case_ : Optional[int] = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
snake_case_ : Union[str, Any] = tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel''']
flax_model.save_pretrained(_a )
print('''T5X Model was sucessfully converted!''' )
if __name__ == "__main__":
lowercase__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.'''
)
parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''')
parser.add_argument(
'''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.'''
)
lowercase__ : Dict = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 485 | 0 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class __a ( __UpperCamelCase ,unittest.TestCase ):
__snake_case : List[str] = XLMRobertaTokenizer
__snake_case : List[Any] = XLMRobertaTokenizerFast
__snake_case : Optional[int] = True
__snake_case : int = True
def A ( self : str ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase_ : str = XLMRobertaTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def A ( self : Dict ):
lowerCAmelCase_ : str = """<pad>"""
lowerCAmelCase_ : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase ) , UpperCAmelCase )
def A ( self : Any ):
lowerCAmelCase_ : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(UpperCAmelCase ) , 10_02 )
def A ( self : str ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_02 )
def A ( self : Optional[int] ):
lowerCAmelCase_ : str = XLMRobertaTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
lowerCAmelCase_ : str = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(UpperCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
lowerCAmelCase_ : int = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCAmelCase_ : Optional[int] = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowerCAmelCase_ : int = tokenizer.convert_ids_to_tokens(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def A ( self : Optional[int] ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCAmelCase_ : List[Any] = (self.rust_tokenizer_class, """hf-internal-testing/tiny-xlm-roberta""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCAmelCase_ : List[str] = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCAmelCase_ : Tuple = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = tempfile.mkdtemp()
lowerCAmelCase_ : List[str] = tokenizer_r.save_pretrained(UpperCAmelCase )
lowerCAmelCase_ : List[Any] = tokenizer_p.save_pretrained(UpperCAmelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
lowerCAmelCase_ : List[str] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(UpperCAmelCase , UpperCAmelCase )
# Checks everything loads correctly in the same way
lowerCAmelCase_ : Tuple = tokenizer_r.from_pretrained(UpperCAmelCase )
lowerCAmelCase_ : List[str] = tokenizer_p.from_pretrained(UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase , UpperCAmelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(UpperCAmelCase )
# Save tokenizer rust, legacy_format=True
lowerCAmelCase_ : Optional[Any] = tempfile.mkdtemp()
lowerCAmelCase_ : int = tokenizer_r.save_pretrained(UpperCAmelCase , legacy_format=UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = tokenizer_p.save_pretrained(UpperCAmelCase )
# Checks it save with the same files
self.assertSequenceEqual(UpperCAmelCase , UpperCAmelCase )
# Checks everything loads correctly in the same way
lowerCAmelCase_ : Any = tokenizer_r.from_pretrained(UpperCAmelCase )
lowerCAmelCase_ : int = tokenizer_p.from_pretrained(UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase , UpperCAmelCase ) )
shutil.rmtree(UpperCAmelCase )
# Save tokenizer rust, legacy_format=False
lowerCAmelCase_ : Dict = tempfile.mkdtemp()
lowerCAmelCase_ : Union[str, Any] = tokenizer_r.save_pretrained(UpperCAmelCase , legacy_format=UpperCAmelCase )
lowerCAmelCase_ : Any = tokenizer_p.save_pretrained(UpperCAmelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCAmelCase_ : Dict = tokenizer_r.from_pretrained(UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = tokenizer_p.from_pretrained(UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase , UpperCAmelCase ) )
shutil.rmtree(UpperCAmelCase )
@cached_property
def A ( self : str ):
return XLMRobertaTokenizer.from_pretrained("""xlm-roberta-base""" )
def A ( self : Union[str, Any] ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(UpperCAmelCase , f.name )
lowerCAmelCase_ : Optional[int] = XLMRobertaTokenizer(f.name , keep_accents=UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = pickle.dumps(UpperCAmelCase )
pickle.loads(UpperCAmelCase )
def A ( self : List[str] ):
if not self.test_rust_tokenizer:
return
lowerCAmelCase_ : Tuple = self.get_tokenizer()
lowerCAmelCase_ : int = self.get_rust_tokenizer()
lowerCAmelCase_ : Union[str, Any] = """I was born in 92000, and this is falsé."""
lowerCAmelCase_ : str = tokenizer.tokenize(UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = rust_tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : List[Any] = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = rust_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = self.get_rust_tokenizer()
lowerCAmelCase_ : List[Any] = tokenizer.encode(UpperCAmelCase )
lowerCAmelCase_ : str = rust_tokenizer.encode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
@slow
def A ( self : List[Any] ):
lowerCAmelCase_ : Optional[int] = """Hello World!"""
lowerCAmelCase_ : Any = [0, 3_53_78, 66_61, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(UpperCAmelCase , self.big_tokenizer.encode(UpperCAmelCase ) )
@slow
def A ( self : Optional[Any] ):
lowerCAmelCase_ : Union[str, Any] = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
lowerCAmelCase_ : Dict = [
0,
32_93,
83,
10,
45_52,
49_89,
79_86,
6_78,
10,
59_15,
1_11,
17_94_59,
12_48_50,
4,
60_44,
2_37,
12,
6,
5,
6,
4,
67_80,
7_05,
15,
13_88,
44,
3_78,
1_01_14,
7_11,
1_52,
20,
6,
5,
2_23_76,
6_42,
12_21,
1_51_90,
3_41_53,
4_50,
56_08,
9_59,
11_19,
5_77_02,
1_36,
1_86,
47,
10_98,
2_93_67,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
60_44,
2_37,
62_84,
5_09_01,
5_28,
31,
90,
34,
9_27,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(UpperCAmelCase , self.big_tokenizer.encode(UpperCAmelCase ) )
@slow
def A ( self : List[Any] ):
# fmt: off
lowerCAmelCase_ : Optional[int] = {"""input_ids""": [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase , model_name="""xlm-roberta-base""" , revision="""d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3""" , )
| 600 |
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class __a ( __UpperCamelCase ):
__snake_case : Union[str, Any] = ComputeEnvironment.AMAZON_SAGEMAKER
__snake_case : Tuple = True
__snake_case : List[str] = """ml.p3.2xlarge"""
__snake_case : Optional[int] = """accelerate_sagemaker_execution_role"""
__snake_case : List[Any] = """hf-sm"""
__snake_case : str = """us-east-1"""
__snake_case : int = 1
__snake_case : int = """accelerate-sagemaker-1"""
__snake_case : Union[str, Any] = """1.6"""
__snake_case : Tuple = """4.4"""
__snake_case : List[str] = """train.py"""
__snake_case : str = [
"""--model_name_or_path""",
"""bert""",
"""--do_train""",
"""False""",
"""--epochs""",
"""3""",
"""--learning_rate""",
"""5e-5""",
"""--max_steps""",
"""50.5""",
]
__snake_case : List[Any] = [
"""--model_name_or_path""",
"""bert""",
"""--do_train""",
"""--do_test""",
"""False""",
"""--do_predict""",
"""--epochs""",
"""3""",
"""--learning_rate""",
"""5e-5""",
"""--max_steps""",
"""50.5""",
]
class __a ( unittest.TestCase ):
def A ( self : int ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
lowerCAmelCase_ : Dict = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args["""model_name_or_path"""] , UpperCAmelCase )
assert isinstance(converted_args["""do_train"""] , UpperCAmelCase )
assert isinstance(converted_args["""epochs"""] , UpperCAmelCase )
assert isinstance(converted_args["""learning_rate"""] , UpperCAmelCase )
assert isinstance(converted_args["""max_steps"""] , UpperCAmelCase )
with pytest.raises(UpperCAmelCase ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 600 | 1 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _a :
'''simple docstring'''
def __init__( self , A__ , A__=13 , A__=3 , A__=True , A__=True , A__=0.1 , A__=0.1 , A__=224 , A__=1000 , A__=[3, 3, 6, 4] , A__=[48, 56, 112, 220] , ):
A__ : Tuple = parent
A__ : str = batch_size
A__ : int = num_channels
A__ : Union[str, Any] = is_training
A__ : Tuple = use_labels
A__ : List[str] = hidden_dropout_prob
A__ : Any = attention_probs_dropout_prob
A__ : Dict = num_labels
A__ : List[Any] = image_size
A__ : List[Any] = layer_depths
A__ : str = embed_dims
def __A ( self ):
A__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ : int = None
if self.use_labels:
A__ : int = ids_tensor([self.batch_size] , self.num_labels )
A__ : Tuple = self.get_config()
return config, pixel_values, labels
def __A ( self ):
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=A__ , layer_scale_init_value=1e-5 , )
def __A ( self , A__ , A__ , A__ ):
A__ : List[Any] = SwiftFormerModel(config=A__ )
model.to(A__ )
model.eval()
A__ : List[Any] = model(A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def __A ( self , A__ , A__ , A__ ):
A__ : Tuple = self.num_labels
A__ : Tuple = SwiftFormerForImageClassification(A__ )
model.to(A__ )
model.eval()
A__ : int = model(A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
A__ : List[str] = SwiftFormerForImageClassification(A__ )
model.to(A__ )
model.eval()
A__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ : Optional[int] = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self ):
((A__) , (A__) , (A__)) : Tuple = self.prepare_config_and_inputs()
A__ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _a (__magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Dict = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
UpperCAmelCase__: List[Any] = (
{'''feature-extraction''': SwiftFormerModel, '''image-classification''': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__: Dict = False
UpperCAmelCase__: Union[str, Any] = False
UpperCAmelCase__: Any = False
UpperCAmelCase__: int = False
UpperCAmelCase__: Any = False
def __A ( self ):
A__ : List[str] = SwiftFormerModelTester(self )
A__ : int = ConfigTester(
self , config_class=A__ , has_text_modality=A__ , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def __A ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" )
def __A ( self ):
pass
def __A ( self ):
A__ , A__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : List[str] = model_class(A__ )
A__ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A__ , nn.Linear ) )
def __A ( self ):
A__ , A__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : Any = model_class(A__ )
A__ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ : Dict = [*signature.parameters.keys()]
A__ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A__ )
def __A ( self ):
A__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def __A ( self ):
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A__ )
@slow
def __A ( self ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Tuple = SwiftFormerModel.from_pretrained(A__ )
self.assertIsNotNone(A__ )
@unittest.skip(reason="""SwiftFormer does not output attentions""" )
def __A ( self ):
pass
def __A ( self ):
def check_hidden_states_output(A__ , A__ , A__ ):
A__ : Any = model_class(A__ )
model.to(A__ )
model.eval()
with torch.no_grad():
A__ : Any = model(**self._prepare_for_class(A__ , A__ ) )
A__ : str = outputs.hidden_states
A__ : Tuple = 8
self.assertEqual(len(A__ ) , A__ ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(A__ ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
A__ , A__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : Any = True
check_hidden_states_output(A__ , A__ , A__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ : List[Any] = True
check_hidden_states_output(A__ , A__ , A__ )
def __A ( self ):
def _config_zero_init(A__ ):
A__ : str = copy.deepcopy(A__ )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(A__ , A__ , 1e-10 )
if isinstance(getattr(A__ , A__ , A__ ) , A__ ):
A__ : Optional[Any] = _config_zero_init(getattr(A__ , A__ ) )
setattr(A__ , A__ , A__ )
return configs_no_init
A__ , A__ : str = self.model_tester.prepare_config_and_inputs_for_common()
A__ : List[Any] = _config_zero_init(A__ )
for model_class in self.all_model_classes:
A__ : str = model_class(config=A__ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __A ( self ):
pass
def UpperCamelCase () -> Optional[Any]:
A__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _a (unittest.TestCase ):
'''simple docstring'''
@cached_property
def __A ( self ):
return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None
@slow
def __A ( self ):
A__ : List[Any] = SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(A__ )
A__ : Any = self.default_image_processor
A__ : List[Any] = prepare_img()
A__ : Optional[Any] = image_processor(images=A__ , return_tensors="""pt""" ).to(A__ )
# forward pass
with torch.no_grad():
A__ : Dict = model(**A__ )
# verify the logits
A__ : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , A__ )
A__ : Any = torch.tensor([[-2.1_703e00, 2.1_107e00, -2.0_811e00]] ).to(A__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A__ , atol=1e-4 ) )
| 64 |
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def UpperCamelCase (lowercase_: np.ndarray , lowercase_: np.ndarray , lowercase_: np.ndarray , lowercase_: int , lowercase_: int ) -> np.ndarray:
A__ : Any = cva.getAffineTransform(lowercase_ , lowercase_ )
return cva.warpAffine(lowercase_ , lowercase_ , (rows, cols) )
if __name__ == "__main__":
# read original image
A_ : List[Any] = cva.imread(
str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg')
)
# turn image in gray scale value
A_ : List[Any] = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
A_ , A_ : Optional[Any] = gray_img.shape
# set different points to rotate image
A_ : str = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
A_ : Dict = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
A_ : Optional[int] = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
A_ : Optional[int] = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
A_ : Dict = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
A_ : Union[str, Any] = plt.figure(1)
A_ : Union[str, Any] = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray')
plt.title(titles[i])
plt.axis('off')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 64 | 1 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__a: List[str] = logging.get_logger(__name__)
__a: Dict = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
__a: List[str] = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
__a: Optional[Any] = {
"facebook/blenderbot_small-90M": 5_12,
}
class UpperCAmelCase ( snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = BlenderbotSmallTokenizer
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="<|endoftext|>" , __lowerCAmelCase="<|endoftext|>" , __lowerCAmelCase="<|endoftext|>" , __lowerCAmelCase=False , __lowerCAmelCase=True , **__lowerCAmelCase , ) -> Any:
super().__init__(
ByteLevelBPETokenizer(
vocab=lowerCamelCase_ , merges=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , trim_offsets=lowerCamelCase_ , ) , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , **lowerCamelCase_ , )
lowercase__ : List[str] = add_prefix_space
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase=None ) -> List[str]:
lowercase__ : Tuple = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> List[int]:
lowercase__ : Tuple = [self.sep_token_id]
lowercase__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 152 |
'''simple docstring'''
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class a ( snake_case__ ):
'''simple docstring'''
__lowerCAmelCase : int = 0
__lowerCAmelCase : bool = False
__lowerCAmelCase : float = 3.0
class a ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ) -> Dict:
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} )
self.assertDictEqual(MockClass(a=2 , b=lowerCamelCase_ ).to_kwargs() , {'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'a': 2, 'c': 2.25} )
@require_cuda
def __UpperCamelCase ( self ) -> int:
# If no defaults are changed, `to_kwargs` returns an empty dict.
_a : List[str] = GradScalerKwargs(init_scale=1_0_2_4 , growth_factor=2 )
AcceleratorState._reset_state()
_a : Optional[int] = Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
_a : Tuple = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2_0_0_0 )
self.assertEqual(scaler._enabled , lowerCamelCase_ )
@require_multi_gpu
def __UpperCamelCase ( self ) -> Optional[Any]:
_a : List[str] = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(lowerCamelCase_ , env=os.environ.copy() )
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
UpperCAmelCase_ : str = Accelerator(kwargs_handlers=[ddp_scaler])
UpperCAmelCase_ : List[Any] = torch.nn.Linear(100, 200)
UpperCAmelCase_ : Tuple = accelerator.prepare(model)
# Check the values changed in kwargs
UpperCAmelCase_ : List[str] = ""
UpperCAmelCase_ : Any = model.bucket_bytes_cap // (1024 * 1024)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 120 | 0 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
@staticmethod
@abstractmethod
def SCREAMING_SNAKE_CASE ( lowerCAmelCase : ArgumentParser ) -> List[str]:
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
raise NotImplementedError()
| 218 |
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def snake_case_ (__A : int ) -> str:
__lowerCAmelCase : str = int(__A )
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : str = t // 3_6_0_0, (t // 6_0) % 6_0, t % 6_0
return f'''{h}:{m:02d}:{s:02d}''' if h != 0 else f'''{m:02d}:{s:02d}'''
def snake_case_ (__A : Dict , __A : Any , __A : List[str] , __A : Optional[int] , __A : Dict=3_0_0 ) -> int:
# docstyle-ignore
return f'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def snake_case_ (__A : Optional[Any] ) -> Tuple:
__lowerCAmelCase : List[Any] = """<table border=\"1\" class=\"dataframe\">\n"""
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
__lowerCAmelCase : Any = f'''{elt:.6f}''' if isinstance(__A , __A ) else str(__A )
html_code += f''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowerCamelCase : Optional[int] =5
lowerCamelCase : Tuple =0.2
def __init__( self : Tuple , lowerCAmelCase : int , lowerCAmelCase : Optional[str] = None , lowerCAmelCase : bool = True , lowerCAmelCase : Optional["NotebookTrainingTracker"] = None , lowerCAmelCase : int = 3_00 , ) -> int:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = total
__lowerCAmelCase : Dict = """""" if prefix is None else prefix
__lowerCAmelCase : str = leave
__lowerCAmelCase : Optional[Any] = parent
__lowerCAmelCase : Optional[Any] = width
__lowerCAmelCase : List[str] = None
__lowerCAmelCase : Union[str, Any] = None
__lowerCAmelCase : List[str] = None
def SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase : int , lowerCAmelCase : bool = False , lowerCAmelCase : str = None ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : str = value
if comment is not None:
__lowerCAmelCase : Optional[Any] = comment
if self.last_value is None:
__lowerCAmelCase : List[Any] = time.time()
__lowerCAmelCase : Optional[int] = value
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : Any = self.warmup
__lowerCAmelCase : List[str] = 1
self.update_bar(lowerCAmelCase )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
__lowerCAmelCase : Optional[Any] = time.time()
__lowerCAmelCase : Optional[int] = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
__lowerCAmelCase : Optional[Any] = self.elapsed_time / (value - self.start_value)
else:
__lowerCAmelCase : str = None
if value >= self.total:
__lowerCAmelCase : Any = self.total
__lowerCAmelCase : List[str] = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
__lowerCAmelCase : List[str] = self.average_time_per_item * (self.total - value)
self.update_bar(lowerCAmelCase )
__lowerCAmelCase : str = value
__lowerCAmelCase : Union[str, Any] = current_time
if self.average_time_per_item is None:
__lowerCAmelCase : Optional[Any] = 1
else:
__lowerCAmelCase : List[str] = max(int(self.update_every / self.average_time_per_item ) , 1 )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[int]=None ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : List[str] = """ """ * (len(str(self.total ) ) - len(str(lowerCAmelCase ) )) + str(lowerCAmelCase )
if self.elapsed_time is None:
__lowerCAmelCase : List[str] = f'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
__lowerCAmelCase : Dict = f'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'''
else:
__lowerCAmelCase : Dict = (
f'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'''
f''' {format_time(self.predicted_remaining )}'''
)
self.label += f''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment ) == 0 else f''', {self.comment}]'''
self.display()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : str = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
__lowerCAmelCase : List[str] = disp.display(disp.HTML(self.html_code ) , display_id=lowerCAmelCase )
else:
self.output.update(disp.HTML(self.html_code ) )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
"""simple docstring"""
if self.parent is None and self.output is not None:
self.output.update(disp.HTML("""""" ) )
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[Any]=None ) -> Any:
"""simple docstring"""
super().__init__(lowerCAmelCase )
__lowerCAmelCase : str = None if column_names is None else [column_names]
__lowerCAmelCase : List[str] = None
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
__lowerCAmelCase : Optional[int] = disp.display(disp.HTML(self.html_code ) , display_id=lowerCAmelCase )
else:
self.output.update(disp.HTML(self.html_code ) )
def SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
if self.inner_table is None:
__lowerCAmelCase : Tuple = [list(values.keys() ), list(values.values() )]
else:
__lowerCAmelCase : Dict = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(lowerCAmelCase )
__lowerCAmelCase : List[str] = columns
self.inner_table.append([values[c] for c in columns] )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Tuple=None , lowerCAmelCase : List[str]=3_00 ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = NotebookProgressBar(lowerCAmelCase , prefix=lowerCAmelCase , parent=self , width=lowerCAmelCase )
return self.child_bar
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = None
self.display()
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __init__( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : int = None
__lowerCAmelCase : Any = None
__lowerCAmelCase : Optional[Any] = False
def SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : int , **lowerCAmelCase : Any ) -> str:
"""simple docstring"""
__lowerCAmelCase : int = """Epoch""" if args.evaluation_strategy == IntervalStrategy.EPOCH else """Step"""
__lowerCAmelCase : Tuple = 0
__lowerCAmelCase : str = 0
__lowerCAmelCase : List[Any] = [self.first_column] + ["""Training Loss"""]
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append("""Validation Loss""" )
__lowerCAmelCase : int = NotebookTrainingTracker(state.max_steps , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : int , **lowerCAmelCase : Tuple ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = int(state.epoch ) if int(state.epoch ) == state.epoch else f'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1 , comment=f'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , )
__lowerCAmelCase : Optional[Any] = False
def SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple=None , **lowerCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
if not has_length(lowerCAmelCase ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
__lowerCAmelCase : List[str] = self.training_tracker.add_child(len(lowerCAmelCase ) )
else:
__lowerCAmelCase : List[Any] = NotebookProgressBar(len(lowerCAmelCase ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple , **lowerCAmelCase : Tuple ) -> Optional[int]:
"""simple docstring"""
if self.prediction_bar is not None:
self.prediction_bar.close()
__lowerCAmelCase : List[str] = None
def SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase : str , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any=None , **lowerCAmelCase : Tuple ) -> List[str]:
"""simple docstring"""
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
__lowerCAmelCase : List[str] = {"""Training Loss""": logs["""loss"""]}
# First column is necessarily Step sine we're not in epoch eval strategy
__lowerCAmelCase : Tuple = state.global_step
self.training_tracker.write_line(lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : int=None , **lowerCAmelCase : Union[str, Any] ) -> str:
"""simple docstring"""
if self.training_tracker is not None:
__lowerCAmelCase : Union[str, Any] = {"""Training Loss""": """No log""", """Validation Loss""": """No log"""}
for log in reversed(state.log_history ):
if "loss" in log:
__lowerCAmelCase : int = log["""loss"""]
break
if self.first_column == "Epoch":
__lowerCAmelCase : int = int(state.epoch )
else:
__lowerCAmelCase : Optional[int] = state.global_step
__lowerCAmelCase : Union[str, Any] = """eval"""
for k in metrics:
if k.endswith("""_loss""" ):
__lowerCAmelCase : Dict = re.sub(r"""\_loss$""" , """""" , lowerCAmelCase )
__lowerCAmelCase : Tuple = metrics.pop("""total_flos""" , lowerCAmelCase )
__lowerCAmelCase : List[Any] = metrics.pop("""epoch""" , lowerCAmelCase )
__lowerCAmelCase : Optional[Any] = metrics.pop(f'''{metric_key_prefix}_runtime''' , lowerCAmelCase )
__lowerCAmelCase : Tuple = metrics.pop(f'''{metric_key_prefix}_samples_per_second''' , lowerCAmelCase )
__lowerCAmelCase : List[Any] = metrics.pop(f'''{metric_key_prefix}_steps_per_second''' , lowerCAmelCase )
__lowerCAmelCase : Dict = metrics.pop(f'''{metric_key_prefix}_jit_compilation_time''' , lowerCAmelCase )
for k, v in metrics.items():
if k == f'''{metric_key_prefix}_loss''':
__lowerCAmelCase : Tuple = v
else:
__lowerCAmelCase : Any = k.split("""_""" )
__lowerCAmelCase : Optional[Any] = """ """.join([part.capitalize() for part in splits[1:]] )
__lowerCAmelCase : List[str] = v
self.training_tracker.write_line(lowerCAmelCase )
self.training_tracker.remove_child()
__lowerCAmelCase : int = None
# Evaluation takes a long time so we should force the next update.
__lowerCAmelCase : str = True
def SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : List[str] , lowerCAmelCase : Dict , **lowerCAmelCase : Any ) -> Tuple:
"""simple docstring"""
self.training_tracker.update(
state.global_step , comment=f'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' , force_update=lowerCAmelCase )
__lowerCAmelCase : Optional[int] = None
| 218 | 1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.