code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class a ( snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase : Dict = IFInpaintingSuperResolutionPipeline
__lowerCAmelCase : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
__lowerCAmelCase : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""} )
__lowerCAmelCase : str = PipelineTesterMixin.required_optional_params - {"""latents"""}
def __UpperCamelCase ( self ) -> Dict:
return self._get_superresolution_dummy_components()
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_=0 ) -> str:
if str(lowerCamelCase_ ).startswith('mps' ):
_a : List[str] = torch.manual_seed(lowerCamelCase_ )
else:
_a : int = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
_a : str = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
_a : Union[str, Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
_a : List[str] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
_a : Dict = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __UpperCamelCase ( self ) -> int:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def __UpperCamelCase ( self ) -> str:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def __UpperCamelCase ( self ) -> int:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __UpperCamelCase ( self ) -> Tuple:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __UpperCamelCase ( self ) -> Union[str, Any]:
self._test_save_load_local()
def __UpperCamelCase ( self ) -> List[Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 120 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class a :
'''simple docstring'''
def __init__( self , lowerCamelCase_=None , lowerCamelCase_=None ) -> Tuple:
# Input as list
_a : Optional[int] = list(poly_a or [0] )[:]
_a : List[str] = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
_a : str = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
_a : Optional[int] = len(self.polyB )
# Add 0 to make lengths equal a power of 2
_a : str = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
_a : Optional[int] = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
_a : Union[str, Any] = self.__multiply()
def __UpperCamelCase ( self , lowerCamelCase_ ) -> Dict:
_a : Dict = [[x] for x in self.polyA] if which == 'A' else [[x] for x in self.polyB]
# Corner case
if len(lowerCamelCase_ ) <= 1:
return dft[0]
#
_a : List[str] = self.c_max_length // 2
while next_ncol > 0:
_a : Tuple = [[] for i in range(lowerCamelCase_ )]
_a : Tuple = self.root**next_ncol
# First half of next step
_a : Optional[Any] = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(lowerCamelCase_ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
_a : int = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(lowerCamelCase_ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
_a : Union[str, Any] = new_dft
_a : List[Any] = next_ncol // 2
return dft[0]
def __UpperCamelCase ( self ) -> List[Any]:
_a : Tuple = self.__dft('A' )
_a : Union[str, Any] = self.__dft('B' )
_a : List[Any] = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
_a : Optional[Any] = 2
while next_ncol <= self.c_max_length:
_a : Optional[int] = [[] for i in range(lowerCamelCase_ )]
_a : List[str] = self.root ** (next_ncol // 2)
_a : int = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
_a : Dict = new_inverse_c
next_ncol *= 2
# Unpack
_a : Union[str, Any] = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self ) -> Dict:
_a : Optional[int] = 'A = ' + ' + '.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
_a : Dict = 'B = ' + ' + '.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
_a : Tuple = 'A*B = ' + ' + '.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return F'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 120 | 1 |
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def _lowerCamelCase ( __A : bool = True , *__A : List[Any] , **__A : Tuple ) -> Optional[int]:
if not is_tqdm_available():
raise ImportError('''Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.''' )
_UpperCAmelCase : str = False
if main_process_only:
_UpperCAmelCase : Any = PartialState().local_process_index == 0
return _tqdm(*__A , **__A , disable=__A )
| 186 |
from __future__ import annotations
def _lowerCamelCase ( __A : int ) -> list[int]:
_UpperCAmelCase : List[str] = [True] * limit
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : Dict = False
_UpperCAmelCase : List[str] = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
_UpperCAmelCase : List[str] = i * 2
while index < limit:
_UpperCAmelCase : Union[str, Any] = False
_UpperCAmelCase : int = index + i
_UpperCAmelCase : Optional[int] = [2]
for i in range(3 , __A , 2 ):
if is_prime[i]:
primes.append(__A )
return primes
def _lowerCamelCase ( __A : int = 1_000_000 ) -> int:
_UpperCAmelCase : Any = prime_sieve(__A )
_UpperCAmelCase : Optional[Any] = 0
_UpperCAmelCase : Tuple = 0
for i in range(len(__A ) ):
for j in range(i + length , len(__A ) ):
_UpperCAmelCase : List[Any] = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
_UpperCAmelCase : List[str] = j - i
_UpperCAmelCase : Optional[Any] = sol
return largest
if __name__ == "__main__":
print(F'{solution() = }')
| 186 | 1 |
from collections import Counter
from timeit import timeit
def __A ( _lowercase = "" , ):
'''simple docstring'''
return sum(c % 2 for c in Counter(input_str.replace(''' ''' , '''''' ).lower() ).values() ) < 2
def __A ( _lowercase = "" ):
'''simple docstring'''
if len(_lowercase ) == 0:
return True
_A = input_str.replace(''' ''' , '''''' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
_A = {}
for character in lower_case_input_str:
_A = character_freq_dict.get(_lowercase , 0 ) + 1
_A = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def __A ( _lowercase = "" ):
'''simple docstring'''
print('''\nFor string = ''' , _lowercase , ''':''' )
print(
'''> can_string_be_rearranged_as_palindrome_counter()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome_counter(_lowercase ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome_counter(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
print(
'''> can_string_be_rearranged_as_palindrome()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome(_lowercase ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
if __name__ == "__main__":
__A = input(
'Enter string to determine if it can be rearranged as a palindrome or not: '
).strip()
benchmark(check_str)
__A = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f'{check_str} can {"" if status else "not "}be rearranged as a palindrome')
| 484 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def __A ( _lowercase ):
'''simple docstring'''
return (data["data"], data["target"])
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
_A = XGBClassifier()
classifier.fit(_lowercase , _lowercase )
return classifier
def __A ( ):
'''simple docstring'''
_A = load_iris()
_A ,_A = data_handling(_lowercase )
_A ,_A ,_A ,_A = train_test_split(
_lowercase , _lowercase , test_size=0.25 )
_A = iris['''target_names''']
# Create an XGBoost Classifier from the training data
_A = xgboost(_lowercase , _lowercase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
_lowercase , _lowercase , _lowercase , display_labels=_lowercase , cmap='''Blues''' , normalize='''true''' , )
plt.title('''Normalized Confusion Matrix - IRIS Dataset''' )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 484 | 1 |
"""simple docstring"""
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[int]:
_SCREAMING_SNAKE_CASE : Dict = np.inf
def set_batch_size(__SCREAMING_SNAKE_CASE ) -> None:
nonlocal batch_size
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : List[str] = min(__SCREAMING_SNAKE_CASE , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[Any] = min(__SCREAMING_SNAKE_CASE , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and feature.dtype == "binary":
_SCREAMING_SNAKE_CASE : Tuple = min(__SCREAMING_SNAKE_CASE , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return None if batch_size is np.inf else batch_size
class _snake_case ( __snake_case ):
"""simple docstring"""
def __init__( self : int , _A : NestedDataStructureLike[PathLike] , _A : Optional[NamedSplit] = None , _A : Optional[Features] = None , _A : str = None , _A : bool = False , _A : bool = False , _A : Optional[int] = None , **_A : Any , ):
"""simple docstring"""
super().__init__(
_A , split=_A , features=_A , cache_dir=_A , keep_in_memory=_A , streaming=_A , num_proc=_A , **_A , )
_SCREAMING_SNAKE_CASE : Tuple = path_or_paths if isinstance(_A , _A) else {self.split: path_or_paths}
_SCREAMING_SNAKE_CASE : List[Any] = _PACKAGED_DATASETS_MODULES["""parquet"""][1]
_SCREAMING_SNAKE_CASE : Dict = Parquet(
cache_dir=_A , data_files=_A , features=_A , hash=_A , **_A , )
def _lowerCAmelCase ( self : Any):
"""simple docstring"""
if self.streaming:
_SCREAMING_SNAKE_CASE : List[str] = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
_SCREAMING_SNAKE_CASE : str = None
_SCREAMING_SNAKE_CASE : Any = None
_SCREAMING_SNAKE_CASE : List[Any] = None
_SCREAMING_SNAKE_CASE : Any = None
self.builder.download_and_prepare(
download_config=_A , download_mode=_A , verification_mode=_A , base_path=_A , num_proc=self.num_proc , )
_SCREAMING_SNAKE_CASE : Dict = self.builder.as_dataset(
split=self.split , verification_mode=_A , in_memory=self.keep_in_memory)
return dataset
class _snake_case :
"""simple docstring"""
def __init__( self : Optional[int] , _A : Dataset , _A : Union[PathLike, BinaryIO] , _A : Optional[int] = None , **_A : List[Any] , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = dataset
_SCREAMING_SNAKE_CASE : str = path_or_buf
_SCREAMING_SNAKE_CASE : Optional[Any] = batch_size or get_writer_batch_size(dataset.features)
_SCREAMING_SNAKE_CASE : Optional[int] = parquet_writer_kwargs
def _lowerCAmelCase ( self : Dict):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike)):
with open(self.path_or_buf , """wb+""") as buffer:
_SCREAMING_SNAKE_CASE : Optional[Any] = self._write(file_obj=_A , batch_size=_A , **self.parquet_writer_kwargs)
else:
_SCREAMING_SNAKE_CASE : Optional[int] = self._write(file_obj=self.path_or_buf , batch_size=_A , **self.parquet_writer_kwargs)
return written
def _lowerCAmelCase ( self : Optional[Any] , _A : BinaryIO , _A : int , **_A : Optional[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = 0
_SCREAMING_SNAKE_CASE : List[str] = parquet_writer_kwargs.pop("""path_or_buf""" , _A)
_SCREAMING_SNAKE_CASE : Any = self.dataset.features.arrow_schema
_SCREAMING_SNAKE_CASE : List[str] = pq.ParquetWriter(_A , schema=_A , **_A)
for offset in logging.tqdm(
range(0 , len(self.dataset) , _A) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating parquet from Arrow format""" , ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = query_table(
table=self.dataset._data , key=slice(_A , offset + batch_size) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(_A)
written += batch.nbytes
writer.close()
return written
| 635 | """simple docstring"""
from __future__ import annotations
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )-> tuple:
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative in a semiconductor""" )
elif hole_conc < 0:
raise ValueError("""Hole concentration cannot be negative in a semiconductor""" )
elif intrinsic_conc < 0:
raise ValueError(
"""Intrinsic concentration cannot be negative in a semiconductor""" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 635 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"""configuration_mvp""": ["""MVP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MvpConfig""", """MvpOnnxConfig"""],
"""tokenization_mvp""": ["""MvpTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Tuple = ["""MvpTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
"""MVP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MvpForCausalLM""",
"""MvpForConditionalGeneration""",
"""MvpForQuestionAnswering""",
"""MvpForSequenceClassification""",
"""MvpModel""",
"""MvpPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 79 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase ( self )-> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self )-> Union[str, Any]:
_A = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
_A = sd_pipe.to(_UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCamelCase )
sd_pipe.set_scheduler('sample_euler' )
_A = 'A painting of a squirrel eating a burger'
_A = torch.manual_seed(0 )
_A = sd_pipe([prompt] , generator=_UpperCamelCase , guidance_scale=9.0 , num_inference_steps=20 , output_type='np' )
_A = output.images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_A = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase ( self )-> Optional[Any]:
_A = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
_A = sd_pipe.to(_UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCamelCase )
sd_pipe.set_scheduler('sample_euler' )
_A = 'A painting of a squirrel eating a burger'
_A = torch.manual_seed(0 )
_A = sd_pipe([prompt] , generator=_UpperCamelCase , guidance_scale=9.0 , num_inference_steps=20 , output_type='np' )
_A = output.images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_A = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def UpperCamelCase ( self )-> Optional[int]:
_A = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
_A = sd_pipe.to(_UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCamelCase )
sd_pipe.set_scheduler('sample_dpmpp_2m' )
_A = 'A painting of a squirrel eating a burger'
_A = torch.manual_seed(0 )
_A = sd_pipe(
[prompt] , generator=_UpperCamelCase , guidance_scale=7.5 , num_inference_steps=15 , output_type='np' , use_karras_sigmas=_UpperCamelCase , )
_A = output.images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_A = np.array(
[0.1138_1689, 0.1211_2921, 0.138_9457, 0.1254_9606, 0.124_4964, 0.1083_1517, 0.1156_2866, 0.1086_7816, 0.1049_9048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 292 | 0 |
"""simple docstring"""
from math import sqrt
def lowercase__(A = 1_000_000 ) ->int:
"""simple docstring"""
lowercase__ : int= 0
lowercase__ : int= 0
lowercase__ : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(A , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"""{solution() = }""")
| 85 |
"""simple docstring"""
from ....utils import logging
a : List[str] = logging.get_logger(__name__)
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=None , snake_case__=2048 ):
'''simple docstring'''
lowercase__ : Dict= config.__dict__
lowercase__ : str= modal_hidden_size
if num_labels:
lowercase__ : List[str]= num_labels
| 85 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"google/efficientnet-b7": "https://huggingface.co/google/efficientnet-b7/resolve/main/config.json",
}
class UpperCAmelCase_ (lowerCamelCase_ ):
"""simple docstring"""
UpperCamelCase_ : Any = """efficientnet"""
def __init__( self : Optional[Any] , a_ : int = 3 , a_ : int = 6_00 , a_ : float = 2.0 , a_ : float = 3.1 , a_ : int = 8 , a_ : List[int] = [3, 3, 5, 3, 5, 5, 3] , a_ : List[int] = [32, 16, 24, 40, 80, 1_12, 1_92] , a_ : List[int] = [16, 24, 40, 80, 1_12, 1_92, 3_20] , a_ : List[int] = [] , a_ : List[int] = [1, 2, 2, 2, 1, 2, 1] , a_ : List[int] = [1, 2, 2, 3, 3, 4, 1] , a_ : List[int] = [1, 6, 6, 6, 6, 6, 6] , a_ : float = 0.25 , a_ : str = "swish" , a_ : int = 25_60 , a_ : str = "mean" , a_ : float = 0.02 , a_ : float = 0.001 , a_ : float = 0.99 , a_ : float = 0.5 , a_ : float = 0.2 , **a_ : List[Any] , )-> List[Any]:
"""simple docstring"""
super().__init__(**a_ )
UpperCAmelCase_ : Tuple = num_channels
UpperCAmelCase_ : List[Any] = image_size
UpperCAmelCase_ : Optional[Any] = width_coefficient
UpperCAmelCase_ : int = depth_coefficient
UpperCAmelCase_ : int = depth_divisor
UpperCAmelCase_ : Optional[int] = kernel_sizes
UpperCAmelCase_ : Any = in_channels
UpperCAmelCase_ : Any = out_channels
UpperCAmelCase_ : Optional[Any] = depthwise_padding
UpperCAmelCase_ : Tuple = strides
UpperCAmelCase_ : List[str] = num_block_repeats
UpperCAmelCase_ : Optional[int] = expand_ratios
UpperCAmelCase_ : Any = squeeze_expansion_ratio
UpperCAmelCase_ : List[Any] = hidden_act
UpperCAmelCase_ : List[Any] = hidden_dim
UpperCAmelCase_ : Union[str, Any] = pooling_type
UpperCAmelCase_ : Tuple = initializer_range
UpperCAmelCase_ : Union[str, Any] = batch_norm_eps
UpperCAmelCase_ : Optional[Any] = batch_norm_momentum
UpperCAmelCase_ : Union[str, Any] = dropout_rate
UpperCAmelCase_ : int = drop_connect_rate
UpperCAmelCase_ : List[str] = sum(a_ ) * 4
class UpperCAmelCase_ (lowerCamelCase_ ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] = version.parse("""1.11""" )
@property
def a ( self : Optional[int] )-> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def a ( self : List[Any] )-> float:
"""simple docstring"""
return 1E-5
| 470 |
"""simple docstring"""
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 470 | 1 |
"""simple docstring"""
import enum
import shutil
import sys
__lowerCamelCase , __lowerCamelCase = shutil.get_terminal_size()
__lowerCamelCase = {"UP": "A", "DOWN": "B", "RIGHT": "C", "LEFT": "D"}
class _lowercase ( enum.Enum ):
_lowerCamelCase = 0
_lowerCamelCase = 1
def lowercase ( __UpperCamelCase , __UpperCamelCase="" ) -> List[str]:
sys.stdout.write(str(__UpperCamelCase ) + end )
sys.stdout.flush()
def lowercase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase="" ) -> Union[str, Any]:
forceWrite(f'''\u001b[{color}m{content}\u001b[0m''' , __UpperCamelCase )
def lowercase ( ) -> Any:
forceWrite('''\r''' )
def lowercase ( __UpperCamelCase , __UpperCamelCase ) -> str:
forceWrite(f'''\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}''' )
def lowercase ( ) -> Optional[int]:
forceWrite(''' ''' * TERMINAL_WIDTH )
reset_cursor()
def lowercase ( ) -> Tuple:
reset_cursor()
forceWrite('''-''' * TERMINAL_WIDTH )
| 712 |
"""simple docstring"""
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class _lowercase ( __UpperCAmelCase ):
_lowerCamelCase = (EulerDiscreteScheduler,)
_lowerCamelCase = 10
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
__magic_name__ = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
}
config.update(**UpperCamelCase_ )
return config
def lowerCAmelCase__ ( self ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=UpperCamelCase_ , beta_end=UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
__magic_name__ = self.scheduler_classes[0]
__magic_name__ = self.get_scheduler_config()
__magic_name__ = scheduler_class(**UpperCamelCase_ )
scheduler.set_timesteps(self.num_inference_steps )
__magic_name__ = torch.manual_seed(0 )
__magic_name__ = self.dummy_model()
__magic_name__ = self.dummy_sample_deter * scheduler.init_noise_sigma
__magic_name__ = sample.to(UpperCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
__magic_name__ = scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ )
__magic_name__ = model(UpperCamelCase_ , UpperCamelCase_ )
__magic_name__ = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ )
__magic_name__ = output.prev_sample
__magic_name__ = torch.sum(torch.abs(UpperCamelCase_ ) )
__magic_name__ = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1E-2
assert abs(result_mean.item() - 0.0_1_3_1 ) < 1E-3
def lowerCAmelCase__ ( self ):
__magic_name__ = self.scheduler_classes[0]
__magic_name__ = self.get_scheduler_config(prediction_type='''v_prediction''' )
__magic_name__ = scheduler_class(**UpperCamelCase_ )
scheduler.set_timesteps(self.num_inference_steps )
__magic_name__ = torch.manual_seed(0 )
__magic_name__ = self.dummy_model()
__magic_name__ = self.dummy_sample_deter * scheduler.init_noise_sigma
__magic_name__ = sample.to(UpperCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
__magic_name__ = scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ )
__magic_name__ = model(UpperCamelCase_ , UpperCamelCase_ )
__magic_name__ = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ )
__magic_name__ = output.prev_sample
__magic_name__ = torch.sum(torch.abs(UpperCamelCase_ ) )
__magic_name__ = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 0.0_0_0_2 ) < 1E-2
assert abs(result_mean.item() - 2.2_6_7_6E-0_6 ) < 1E-3
def lowerCAmelCase__ ( self ):
__magic_name__ = self.scheduler_classes[0]
__magic_name__ = self.get_scheduler_config()
__magic_name__ = scheduler_class(**UpperCamelCase_ )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCamelCase_ )
__magic_name__ = torch.manual_seed(0 )
__magic_name__ = self.dummy_model()
__magic_name__ = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
__magic_name__ = sample.to(UpperCamelCase_ )
for t in scheduler.timesteps:
__magic_name__ = scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ )
__magic_name__ = model(UpperCamelCase_ , UpperCamelCase_ )
__magic_name__ = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ )
__magic_name__ = output.prev_sample
__magic_name__ = torch.sum(torch.abs(UpperCamelCase_ ) )
__magic_name__ = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1E-2
assert abs(result_mean.item() - 0.0_1_3_1 ) < 1E-3
def lowerCAmelCase__ ( self ):
__magic_name__ = self.scheduler_classes[0]
__magic_name__ = self.get_scheduler_config()
__magic_name__ = scheduler_class(**UpperCamelCase_ , use_karras_sigmas=UpperCamelCase_ )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCamelCase_ )
__magic_name__ = torch.manual_seed(0 )
__magic_name__ = self.dummy_model()
__magic_name__ = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
__magic_name__ = sample.to(UpperCamelCase_ )
for t in scheduler.timesteps:
__magic_name__ = scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ )
__magic_name__ = model(UpperCamelCase_ , UpperCamelCase_ )
__magic_name__ = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ )
__magic_name__ = output.prev_sample
__magic_name__ = torch.sum(torch.abs(UpperCamelCase_ ) )
__magic_name__ = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9 ) < 1E-2
assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3 ) < 1E-3
| 190 | 0 |
'''simple docstring'''
from __future__ import annotations
import os
from collections.abc import Mapping
lowerCAmelCase__ : int = tuple[int, int]
class a :
"""simple docstring"""
def __init__( self : Any , snake_case_ : set[int] , snake_case_ : Mapping[EdgeT, int] ):
'''simple docstring'''
snake_case__ : set[int] = vertices
snake_case__ : dict[EdgeT, int] = {
(min(snake_case_ ), max(snake_case_ )): weight for edge, weight in edges.items()
}
def __magic_name__ ( self : Optional[int] , snake_case_ : EdgeT , snake_case_ : int ):
'''simple docstring'''
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
snake_case__ : Optional[int] = weight
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
snake_case__ : Graph = Graph({min(self.vertices )} , {} )
snake_case__ : EdgeT
snake_case__ : int
snake_case__ : EdgeT
snake_case__ : int
while len(subgraph.vertices ) < len(self.vertices ):
snake_case__ : List[str] = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
snake_case__ : List[str] = edge
snake_case__ : Tuple = weight
subgraph.add_edge(snake_case_ , snake_case_ )
return subgraph
def _a ( __lowerCAmelCase : str = "p107_network.txt" ):
"""simple docstring"""
snake_case__ : str = os.path.abspath(os.path.dirname(__lowerCAmelCase ) )
snake_case__ : str = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ : dict[EdgeT, int] = {}
snake_case__ : list[str]
snake_case__ : int
snake_case__ : int
with open(__lowerCAmelCase ) as f:
snake_case__ : Optional[int] = f.read().strip().split('''\n''' )
snake_case__ : str = [line.split(''',''' ) for line in data]
for edgea in range(1 , len(__lowerCAmelCase ) ):
for edgea in range(__lowerCAmelCase ):
if adjaceny_matrix[edgea][edgea] != "-":
snake_case__ : Any = int(adjaceny_matrix[edgea][edgea] )
snake_case__ : Graph = Graph(set(range(len(__lowerCAmelCase ) ) ) , __lowerCAmelCase )
snake_case__ : Graph = graph.prims_algorithm()
snake_case__ : int = sum(graph.edges.values() )
snake_case__ : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 347 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class a :
"""simple docstring"""
def __init__( self : Optional[Any] , snake_case_ : List[str]=2 , snake_case_ : Optional[int]=3 , snake_case_ : Union[str, Any]=6_4 , snake_case_ : Optional[Any]=None ):
'''simple docstring'''
snake_case__ : List[str] = np.random.default_rng(snake_case_ )
snake_case__ : int = length
snake_case__ : Tuple = rng.normal(size=(length,) ).astype(np.floataa )
snake_case__ : Optional[Any] = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : List[Any] ):
'''simple docstring'''
return self.length
def __getitem__( self : List[str] , snake_case_ : int ):
'''simple docstring'''
return {"x": self.x[i], "y": self.y[i]}
class a ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : int , snake_case_ : str=0 , snake_case_ : Optional[Any]=0 , snake_case_ : Tuple=False ):
'''simple docstring'''
super().__init__()
snake_case__ : List[str] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
snake_case__ : Any = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
snake_case__ : int = True
def __magic_name__ ( self : int , snake_case_ : str=None ):
'''simple docstring'''
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
snake_case__ : str = False
return x * self.a[0] + self.b[0]
class a ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , snake_case_ : Tuple=0 , snake_case_ : int=0 , snake_case_ : int=False ):
'''simple docstring'''
super().__init__()
snake_case__ : Tuple = torch.nn.Parameter(torch.tensor(snake_case_ ).float() )
snake_case__ : int = torch.nn.Parameter(torch.tensor(snake_case_ ).float() )
snake_case__ : Union[str, Any] = True
def __magic_name__ ( self : Union[str, Any] , snake_case_ : List[str]=None ):
'''simple docstring'''
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
snake_case__ : List[Any] = False
return x * self.a + self.b
def _a ( __lowerCAmelCase : Any , __lowerCAmelCase : int = 16 ):
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
snake_case__ : List[str] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
snake_case__ : Optional[int] = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''}
snake_case__ : List[Any] = load_dataset('''csv''' , data_files=__lowerCAmelCase )
snake_case__ : Union[str, Any] = datasets['''train'''].unique('''label''' )
snake_case__ : Optional[Any] = {v: i for i, v in enumerate(__lowerCAmelCase )}
def tokenize_function(__lowerCAmelCase : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
snake_case__ : Union[str, Any] = tokenizer(
examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' )
if "label" in examples:
snake_case__ : List[Any] = [label_to_id[l] for l in examples['''label''']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
snake_case__ : List[Any] = datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , )
def collate_fn(__lowerCAmelCase : Optional[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__lowerCAmelCase , padding='''max_length''' , max_length=1_28 , return_tensors='''pt''' )
return tokenizer.pad(__lowerCAmelCase , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
snake_case__ : str = DataLoader(tokenized_datasets['''train'''] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=2 )
snake_case__ : List[Any] = DataLoader(tokenized_datasets['''validation'''] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=1 )
return train_dataloader, eval_dataloader
| 347 | 1 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
__UpperCAmelCase = get_logger(__name__)
class lowercase__:
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE = None) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : List[Any] =(
os.path.join(__SCREAMING_SNAKE_CASE , config.EXTRACTED_DATASETS_DIR) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
UpperCamelCase__ : List[str] =Extractor
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE) -> str:
"""simple docstring"""
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
UpperCamelCase__ : str =os.path.abspath(__SCREAMING_SNAKE_CASE)
return os.path.join(self.extract_dir , hash_url_to_filename(__SCREAMING_SNAKE_CASE))
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) -> bool:
"""simple docstring"""
return force_extract or (
not os.path.isfile(__SCREAMING_SNAKE_CASE) and not (os.path.isdir(__SCREAMING_SNAKE_CASE) and os.listdir(__SCREAMING_SNAKE_CASE))
)
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False) -> str:
"""simple docstring"""
UpperCamelCase__ : Optional[int] =self.extractor.infer_extractor_format(__SCREAMING_SNAKE_CASE)
if not extractor_format:
return input_path
UpperCamelCase__ : Optional[Any] =self._get_output_path(__SCREAMING_SNAKE_CASE)
if self._do_extract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
self.extractor.extract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
return output_path
class lowercase__( snake_case__ ):
'''simple docstring'''
@classmethod
@abstractmethod
def UpperCAmelCase ( cls , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> bool:
"""simple docstring"""
...
@staticmethod
@abstractmethod
def UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) -> None:
"""simple docstring"""
...
class lowercase__( snake_case__ , snake_case__ ):
'''simple docstring'''
snake_case__ = []
@staticmethod
def UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) -> List[str]:
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , "rb") as f:
return f.read(__SCREAMING_SNAKE_CASE)
@classmethod
def UpperCAmelCase ( cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = b"") -> bool:
"""simple docstring"""
if not magic_number:
UpperCamelCase__ : Optional[int] =max(len(__SCREAMING_SNAKE_CASE) for cls_magic_number in cls.magic_numbers)
try:
UpperCamelCase__ : Tuple =cls.read_magic_number(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
except OSError:
return False
return any(magic_number.startswith(__SCREAMING_SNAKE_CASE) for cls_magic_number in cls.magic_numbers)
class lowercase__( snake_case__ ):
'''simple docstring'''
@classmethod
def UpperCAmelCase ( cls , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> bool:
"""simple docstring"""
return tarfile.is_tarfile(__SCREAMING_SNAKE_CASE)
@staticmethod
def UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) -> str:
"""simple docstring"""
def resolved(__SCREAMING_SNAKE_CASE) -> str:
return os.path.realpath(os.path.abspath(__SCREAMING_SNAKE_CASE))
def badpath(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)).startswith(__SCREAMING_SNAKE_CASE)
def badlink(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) -> bool:
# Links are interpreted relative to the directory containing the link
UpperCamelCase__ : Dict =resolved(os.path.join(__SCREAMING_SNAKE_CASE , os.path.dirname(info.name)))
return badpath(info.linkname , base=__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Optional[Any] =resolved(__SCREAMING_SNAKE_CASE)
for finfo in members:
if badpath(finfo.name , __SCREAMING_SNAKE_CASE):
logger.error(F'''Extraction of {finfo.name} is blocked (illegal path)''')
elif finfo.issym() and badlink(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
logger.error(F'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''')
elif finfo.islnk() and badlink(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
logger.error(F'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''')
else:
yield finfo
@staticmethod
def UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) -> None:
"""simple docstring"""
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Optional[Any] =tarfile.open(__SCREAMING_SNAKE_CASE)
tar_file.extractall(__SCREAMING_SNAKE_CASE , members=TarExtractor.safemembers(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
tar_file.close()
class lowercase__( snake_case__ ):
'''simple docstring'''
snake_case__ = [b'''\x1F\x8B''']
@staticmethod
def UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) -> None:
"""simple docstring"""
with gzip.open(__SCREAMING_SNAKE_CASE , "rb") as gzip_file:
with open(__SCREAMING_SNAKE_CASE , "wb") as extracted_file:
shutil.copyfileobj(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
class lowercase__( snake_case__ ):
'''simple docstring'''
snake_case__ = [
b'''PK\x03\x04''',
b'''PK\x05\x06''', # empty archive
b'''PK\x07\x08''', # spanned archive
]
@classmethod
def UpperCAmelCase ( cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = b"") -> bool:
"""simple docstring"""
if super().is_extractable(__SCREAMING_SNAKE_CASE , magic_number=__SCREAMING_SNAKE_CASE):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(__SCREAMING_SNAKE_CASE , "rb") as fp:
UpperCamelCase__ : List[Any] =_EndRecData(__SCREAMING_SNAKE_CASE)
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET]) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
UpperCamelCase__ : Tuple =fp.read(__SCREAMING_SNAKE_CASE) # CD is where we expect it to be
if len(__SCREAMING_SNAKE_CASE) == sizeCentralDir:
UpperCamelCase__ : List[str] =struct.unpack(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) -> None:
"""simple docstring"""
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE)
with zipfile.ZipFile(__SCREAMING_SNAKE_CASE , "r") as zip_file:
zip_file.extractall(__SCREAMING_SNAKE_CASE)
zip_file.close()
class lowercase__( snake_case__ ):
'''simple docstring'''
snake_case__ = [b'''\xFD\x37\x7A\x58\x5A\x00''']
@staticmethod
def UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) -> None:
"""simple docstring"""
with lzma.open(__SCREAMING_SNAKE_CASE) as compressed_file:
with open(__SCREAMING_SNAKE_CASE , "wb") as extracted_file:
shutil.copyfileobj(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
class lowercase__( snake_case__ ):
'''simple docstring'''
snake_case__ = [b'''Rar!\x1a\x07\x00''', b'''Rar!\x1a\x07\x01\x00'''] # RAR_ID # RAR5_ID
@staticmethod
def UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) -> None:
"""simple docstring"""
if not config.RARFILE_AVAILABLE:
raise ImportError("Please pip install rarfile")
import rarfile
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Dict =rarfile.RarFile(__SCREAMING_SNAKE_CASE)
rf.extractall(__SCREAMING_SNAKE_CASE)
rf.close()
class lowercase__( snake_case__ ):
'''simple docstring'''
snake_case__ = [b'''\x28\xb5\x2F\xFD''']
@staticmethod
def UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) -> None:
"""simple docstring"""
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("Please pip install zstandard")
import zstandard as zstd
UpperCamelCase__ : Any =zstd.ZstdDecompressor()
with open(__SCREAMING_SNAKE_CASE , "rb") as ifh, open(__SCREAMING_SNAKE_CASE , "wb") as ofh:
dctx.copy_stream(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
class lowercase__( snake_case__ ):
'''simple docstring'''
snake_case__ = [b'''\x42\x5A\x68''']
@staticmethod
def UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) -> None:
"""simple docstring"""
with bza.open(__SCREAMING_SNAKE_CASE , "rb") as compressed_file:
with open(__SCREAMING_SNAKE_CASE , "wb") as extracted_file:
shutil.copyfileobj(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
class lowercase__( snake_case__ ):
'''simple docstring'''
snake_case__ = [b'''\x37\x7A\xBC\xAF\x27\x1C''']
@staticmethod
def UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) -> None:
"""simple docstring"""
if not config.PY7ZR_AVAILABLE:
raise ImportError("Please pip install py7zr")
import pyazr
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE)
with pyazr.SevenZipFile(__SCREAMING_SNAKE_CASE , "r") as archive:
archive.extractall(__SCREAMING_SNAKE_CASE)
class lowercase__( snake_case__ ):
'''simple docstring'''
snake_case__ = [b'''\x04\x22\x4D\x18''']
@staticmethod
def UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) -> None:
"""simple docstring"""
if not config.LZ4_AVAILABLE:
raise ImportError("Please pip install lz4")
import lza.frame
with lza.frame.open(__SCREAMING_SNAKE_CASE , "rb") as compressed_file:
with open(__SCREAMING_SNAKE_CASE , "wb") as extracted_file:
shutil.copyfileobj(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
class lowercase__:
'''simple docstring'''
snake_case__ = {
'''tar''': TarExtractor,
'''gzip''': GzipExtractor,
'''zip''': ZipExtractor,
'''xz''': XzExtractor,
'''rar''': RarExtractor,
'''zstd''': ZstdExtractor,
'''bz2''': BzipaExtractor,
'''7z''': SevenZipExtractor, # <Added version="2.4.0"/>
'''lz4''': LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def UpperCAmelCase ( cls) -> str:
"""simple docstring"""
return max(
len(__SCREAMING_SNAKE_CASE)
for extractor in cls.extractors.values()
if issubclass(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
for extractor_magic_number in extractor.magic_numbers)
@staticmethod
def UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) -> Dict:
"""simple docstring"""
try:
return MagicNumberBaseExtractor.read_magic_number(__SCREAMING_SNAKE_CASE , magic_number_length=__SCREAMING_SNAKE_CASE)
except OSError:
return b""
@classmethod
def UpperCAmelCase ( cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False) -> bool:
"""simple docstring"""
warnings.warn(
"Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'infer_extractor_format' instead." , category=__SCREAMING_SNAKE_CASE , )
UpperCamelCase__ : Optional[Any] =cls.infer_extractor_format(__SCREAMING_SNAKE_CASE)
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def UpperCAmelCase ( cls , __SCREAMING_SNAKE_CASE) -> str: # <Added version="2.4.0"/>
"""simple docstring"""
UpperCamelCase__ : Any =cls._get_magic_number_max_length()
UpperCamelCase__ : Optional[Any] =cls._read_magic_number(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(__SCREAMING_SNAKE_CASE , magic_number=__SCREAMING_SNAKE_CASE):
return extractor_format
@classmethod
def UpperCAmelCase ( cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "deprecated" , ) -> None:
"""simple docstring"""
os.makedirs(os.path.dirname(__SCREAMING_SNAKE_CASE) , exist_ok=__SCREAMING_SNAKE_CASE)
# Prevent parallel extractions
UpperCamelCase__ : Optional[Any] =str(Path(__SCREAMING_SNAKE_CASE).with_suffix(".lock"))
with FileLock(__SCREAMING_SNAKE_CASE):
shutil.rmtree(__SCREAMING_SNAKE_CASE , ignore_errors=__SCREAMING_SNAKE_CASE)
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): # passed as positional arg
warnings.warn(
"Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'extractor_format' instead." , category=__SCREAMING_SNAKE_CASE , )
UpperCamelCase__ : List[Any] =extractor if extractor != "deprecated" else extractor_format
else:
UpperCamelCase__ : Tuple =cls.extractors[extractor_format]
return extractor.extract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
else:
warnings.warn(
"Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an "
"exception in 3.0.0." , category=__SCREAMING_SNAKE_CASE , )
for extractor in cls.extractors.values():
if extractor.is_extractable(__SCREAMING_SNAKE_CASE):
return extractor.extract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
| 710 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 582 | 0 |
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
UpperCamelCase = 'scheduler_config.json'
class _A ( UpperCAmelCase_ ):
lowercase_ : Tuple = 1
lowercase_ : Optional[Any] = 2
lowercase_ : Dict = 3
lowercase_ : List[str] = 4
lowercase_ : List[str] = 5
@dataclass
class _A ( UpperCAmelCase_ ):
lowercase_ : jnp.ndarray
class _A :
lowercase_ : Any = SCHEDULER_CONFIG_NAME
lowercase_ : str = ['''dtype''']
lowercase_ : Any = []
lowercase_ : Dict = True
@classmethod
def a ( cls : Union[str, Any] , lowerCamelCase__ : Dict[str, Any] = None , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[int]=False , **lowerCamelCase__ : Dict , ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : Any = cls.load_config(
pretrained_model_name_or_path=lowerCamelCase__ , subfolder=lowerCamelCase__ , return_unused_kwargs=lowerCamelCase__ , **lowerCamelCase__ , )
__UpperCamelCase , __UpperCamelCase : Dict = cls.from_config(lowerCamelCase__ , return_unused_kwargs=lowerCamelCase__ , **lowerCamelCase__ )
if hasattr(lowerCamelCase__ , """create_state""" ) and getattr(lowerCamelCase__ , """has_state""" , lowerCamelCase__ ):
__UpperCamelCase : Tuple = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def a ( self : str , lowerCamelCase__ : Union[str, os.PathLike] , lowerCamelCase__ : bool = False , **lowerCamelCase__ : List[str] ):
"""simple docstring"""
self.save_config(save_directory=lowerCamelCase__ , push_to_hub=lowerCamelCase__ , **lowerCamelCase__ )
@property
def a ( self : Any ):
"""simple docstring"""
return self._get_compatibles()
@classmethod
def a ( cls : str ):
"""simple docstring"""
__UpperCamelCase : Tuple = list(set([cls.__name__] + cls._compatibles ) )
__UpperCamelCase : Union[str, Any] = importlib.import_module(__name__.split(""".""" )[0] )
__UpperCamelCase : str = [
getattr(lowerCamelCase__ , lowerCamelCase__ ) for c in compatible_classes_str if hasattr(lowerCamelCase__ , lowerCamelCase__ )
]
return compatible_classes
def __lowerCamelCase ( __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : Tuple[int] ) -> jnp.ndarray:
assert len(__lowerCAmelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(__lowerCAmelCase ) - x.ndim) ) , __lowerCAmelCase )
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any]=0.999 , __lowerCAmelCase : Optional[int]=jnp.floataa ) -> jnp.ndarray:
def alpha_bar(__lowerCAmelCase : int ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
__UpperCamelCase : int = []
for i in range(__lowerCAmelCase ):
__UpperCamelCase : Any = i / num_diffusion_timesteps
__UpperCamelCase : Dict = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(__lowerCAmelCase ) / alpha_bar(__lowerCAmelCase ) , __lowerCAmelCase ) )
return jnp.array(__lowerCAmelCase , dtype=__lowerCAmelCase )
@flax.struct.dataclass
class _A :
lowercase_ : jnp.ndarray
lowercase_ : jnp.ndarray
lowercase_ : jnp.ndarray
@classmethod
def a ( cls : Optional[int] , lowerCamelCase__ : Union[str, Any] ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = scheduler.config
if config.trained_betas is not None:
__UpperCamelCase : Dict = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
__UpperCamelCase : Any = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__UpperCamelCase : Optional[Any] = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__UpperCamelCase : Optional[int] = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f'beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}' )
__UpperCamelCase : Any = 1.0 - betas
__UpperCamelCase : Any = jnp.cumprod(lowerCamelCase__ , axis=0 )
return cls(
alphas=lowerCamelCase__ , betas=lowerCamelCase__ , alphas_cumprod=lowerCamelCase__ , )
def __lowerCamelCase ( __lowerCAmelCase : CommonSchedulerState , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : jnp.ndarray ) -> int:
__UpperCamelCase : Union[str, Any] = state.alphas_cumprod
__UpperCamelCase : List[str] = alphas_cumprod[timesteps] ** 0.5
__UpperCamelCase : Union[str, Any] = sqrt_alpha_prod.flatten()
__UpperCamelCase : Any = broadcast_to_shape_from_left(__lowerCAmelCase , original_samples.shape )
__UpperCamelCase : str = (1 - alphas_cumprod[timesteps]) ** 0.5
__UpperCamelCase : int = sqrt_one_minus_alpha_prod.flatten()
__UpperCamelCase : Union[str, Any] = broadcast_to_shape_from_left(__lowerCAmelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __lowerCamelCase ( __lowerCAmelCase : CommonSchedulerState , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : jnp.ndarray ) -> int:
__UpperCamelCase , __UpperCamelCase : Dict = get_sqrt_alpha_prod(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__UpperCamelCase : List[str] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __lowerCamelCase ( __lowerCAmelCase : CommonSchedulerState , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : jnp.ndarray ) -> Any:
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = get_sqrt_alpha_prod(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__UpperCamelCase : Tuple = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 269 |
def __lowerCamelCase ( __lowerCAmelCase : str ) -> bool:
if not all(x.isalpha() for x in string ):
raise ValueError("""String must only contain alphabetic characters.""" )
__UpperCamelCase : List[str] = sorted(string.lower() )
return len(__lowerCAmelCase ) == len(set(__lowerCAmelCase ) )
if __name__ == "__main__":
UpperCamelCase = input('Enter a string ').strip()
UpperCamelCase = is_isogram(input_str)
print(F"""{input_str} is {'an' if isogram else 'not an'} isogram.""")
| 269 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : Any = {
'microsoft/table-transformer-detection': (
'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = '''table-transformer'''
_SCREAMING_SNAKE_CASE = ['''past_key_values''']
_SCREAMING_SNAKE_CASE = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : List[str] , UpperCamelCase__ : Any=True , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : int=3 , UpperCamelCase__ : List[Any]=1_0_0 , UpperCamelCase__ : Optional[Any]=6 , UpperCamelCase__ : str=2_0_4_8 , UpperCamelCase__ : Optional[int]=8 , UpperCamelCase__ : Optional[Any]=6 , UpperCamelCase__ : str=2_0_4_8 , UpperCamelCase__ : Tuple=8 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : List[Any]=0.0 , UpperCamelCase__ : str=True , UpperCamelCase__ : Dict="relu" , UpperCamelCase__ : int=2_5_6 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Optional[Any]=0.0_2 , UpperCamelCase__ : int=1.0 , UpperCamelCase__ : Dict=False , UpperCamelCase__ : Optional[int]="sine" , UpperCamelCase__ : Optional[int]="resnet50" , UpperCamelCase__ : int=True , UpperCamelCase__ : Dict=False , UpperCamelCase__ : Union[str, Any]=1 , UpperCamelCase__ : Tuple=5 , UpperCamelCase__ : Any=2 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : List[Any]=1 , UpperCamelCase__ : Any=5 , UpperCamelCase__ : str=2 , UpperCamelCase__ : Any=0.1 , **UpperCamelCase__ : List[str] , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
UpperCamelCase = CONFIG_MAPPING["""resnet"""](out_features=['stage4'] )
elif isinstance(A__ , A__ ):
UpperCamelCase = backbone_config.get('model_type' )
UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase = config_class.from_dict(A__ )
# set timm attributes to None
UpperCamelCase = None, None, None
UpperCamelCase = use_timm_backbone
UpperCamelCase = backbone_config
UpperCamelCase = num_channels
UpperCamelCase = num_queries
UpperCamelCase = d_model
UpperCamelCase = encoder_ffn_dim
UpperCamelCase = encoder_layers
UpperCamelCase = encoder_attention_heads
UpperCamelCase = decoder_ffn_dim
UpperCamelCase = decoder_layers
UpperCamelCase = decoder_attention_heads
UpperCamelCase = dropout
UpperCamelCase = attention_dropout
UpperCamelCase = activation_dropout
UpperCamelCase = activation_function
UpperCamelCase = init_std
UpperCamelCase = init_xavier_std
UpperCamelCase = encoder_layerdrop
UpperCamelCase = decoder_layerdrop
UpperCamelCase = encoder_layers
UpperCamelCase = auxiliary_loss
UpperCamelCase = position_embedding_type
UpperCamelCase = backbone
UpperCamelCase = use_pretrained_backbone
UpperCamelCase = dilation
# Hungarian matcher
UpperCamelCase = class_cost
UpperCamelCase = bbox_cost
UpperCamelCase = giou_cost
# Loss coefficients
UpperCamelCase = mask_loss_coefficient
UpperCamelCase = dice_loss_coefficient
UpperCamelCase = bbox_loss_coefficient
UpperCamelCase = giou_loss_coefficient
UpperCamelCase = eos_coefficient
super().__init__(is_encoder_decoder=A__ , **A__ )
@property
def A ( self : Optional[Any] ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def A ( self : Optional[Any] ):
"""simple docstring"""
return self.d_model
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = version.parse("""1.11""" )
@property
def A ( self : Optional[Any] ):
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def A ( self : str ):
"""simple docstring"""
return 1E-5
@property
def A ( self : Dict ):
"""simple docstring"""
return 1_2
| 720 |
'''simple docstring'''
def __lowerCamelCase ( A__ , A__ ) -> int:
"""simple docstring"""
while a != 0:
UpperCamelCase , UpperCamelCase = b % a, a
return b
def __lowerCamelCase ( A__ , A__ ) -> int:
"""simple docstring"""
if gcd(A__ , A__ ) != 1:
UpperCamelCase = F"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(A__ )
UpperCamelCase , UpperCamelCase , UpperCamelCase = 1, 0, a
UpperCamelCase , UpperCamelCase , UpperCamelCase = 0, 1, m
while va != 0:
UpperCamelCase = ua // va
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 324 | 0 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class a_ :
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=7 , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=33 , UpperCAmelCase=32 , UpperCAmelCase=5 , UpperCAmelCase=4 , UpperCAmelCase=37 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=5_12 , UpperCAmelCase=16 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=None , ):
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_mask
a_ = use_token_type_ids
a_ = use_labels
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = type_sequence_label_size
a_ = initializer_range
a_ = num_labels
a_ = num_choices
a_ = scope
def lowerCAmelCase__ ( self ):
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ = None
if self.use_input_mask:
a_ = random_attention_mask([self.batch_size, self.seq_length] )
a_ = None
a_ = None
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a_ = ids_tensor([self.batch_size] , self.num_choices )
a_ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self ):
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
a_ = EsmModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a_ = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
a_ = model(lowerCAmelCase__ )
a_ = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
a_ = EsmForMaskedLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a_ = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
a_ = self.num_labels
a_ = EsmForTokenClassification(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a_ = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self ):
a_ = self.prepare_config_and_inputs()
(
a_
) = config_and_inputs
a_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class a_ ( a__ , a__ , unittest.TestCase ):
lowerCamelCase__ : Optional[Any] = False
lowerCamelCase__ : Tuple = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : Tuple = ()
lowerCamelCase__ : Optional[int] = (
{
"""feature-extraction""": EsmModel,
"""fill-mask""": EsmForMaskedLM,
"""text-classification""": EsmForSequenceClassification,
"""token-classification""": EsmForTokenClassification,
"""zero-shot""": EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : Any = True
def lowerCAmelCase__ ( self ):
a_ = EsmModelTester(self )
a_ = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 )
def lowerCAmelCase__ ( self ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def lowerCAmelCase__ ( self ):
a_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a_ = type
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def lowerCAmelCase__ ( self ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase__ )
def lowerCAmelCase__ ( self ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__ )
@slow
def lowerCAmelCase__ ( self ):
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = EsmModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def lowerCAmelCase__ ( self ):
a_ = self.model_tester.prepare_config_and_inputs()[0]
a_ = EsmEmbeddings(config=lowerCAmelCase__ )
a_ = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
a_ = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
a_ = create_position_ids_from_input_ids(lowerCAmelCase__ , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(lowerCAmelCase__ , lowerCAmelCase__ ) ) )
def lowerCAmelCase__ ( self ):
a_ = self.model_tester.prepare_config_and_inputs()[0]
a_ = EsmEmbeddings(config=lowerCAmelCase__ )
a_ = torch.empty(2 , 4 , 30 )
a_ = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
a_ = torch.as_tensor([expected_single_positions, expected_single_positions] )
a_ = embeddings.create_position_ids_from_inputs_embeds(lowerCAmelCase__ )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(lowerCAmelCase__ , lowerCAmelCase__ ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def lowerCAmelCase__ ( self ):
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def lowerCAmelCase__ ( self ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase__ ( self ):
pass
@require_torch
class a_ ( a__ ):
@slow
def lowerCAmelCase__ ( self ):
with torch.no_grad():
a_ = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
a_ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
a_ = model(lowerCAmelCase__ )[0]
a_ = 33
a_ = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , lowerCAmelCase__ )
a_ = torch.tensor(
[[[8.92_15, -10.58_98, -6.46_71], [-6.39_67, -13.91_14, -1.12_12], [-7.78_12, -13.95_16, -3.74_06]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=1e-4 ) )
@slow
def lowerCAmelCase__ ( self ):
with torch.no_grad():
a_ = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
a_ = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
a_ = model(lowerCAmelCase__ )[0]
# compare the actual values for a slice.
a_ = torch.tensor(
[[[0.14_44, 0.54_13, 0.32_48], [0.30_34, 0.00_53, 0.31_08], [0.32_28, -0.24_99, 0.34_15]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=1e-4 ) )
| 263 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] ) ->Dict:
'''simple docstring'''
a : Any = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2]
a : str = True if "large" in model_name or "huge" in model_name else False
a : Optional[Any] = True if "large" in model_name or "huge" in model_name else False
a : Dict = True if "large" in model_name or "huge" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
a : Union[str, Any] = [3, 3, 3, 3]
a : List[str] = [5, 5, 5, 5]
elif "fl4" in model_name:
a : Any = [4, 4, 4, 4]
a : Optional[Any] = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
a : Dict = [3, 3, 3, 3]
if "lrf" in model_name:
a : Optional[int] = [3, 3, 3, 3]
else:
a : Tuple = [2, 2, 2, 2]
if "tiny" in model_name:
a : List[str] = 96
elif "small" in model_name:
a : Union[str, Any] = 96
elif "base" in model_name:
a : Dict = 128
elif "large" in model_name:
a : Union[str, Any] = 192
elif "xlarge" in model_name:
a : Tuple = 256
elif "huge" in model_name:
a : List[str] = 352
# set label information
a : List[Any] = "huggingface/label-files"
if "large" in model_name or "huge" in model_name:
a : Optional[int] = "imagenet-22k-id2label.json"
else:
a : List[str] = "imagenet-1k-id2label.json"
a : Optional[int] = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type="dataset" ) , "r" ) )
a : str = {int(_lowercase ): v for k, v in idalabel.items()}
a : List[str] = {v: k for k, v in idalabel.items()}
a : Dict = FocalNetConfig(
embed_dim=_lowercase , depths=_lowercase , focal_levels=_lowercase , focal_windows=_lowercase , use_conv_embed=_lowercase , idalabel=_lowercase , labelaid=_lowercase , use_post_layernorm=_lowercase , use_layerscale=_lowercase , )
return config
def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] ) ->List[Any]:
'''simple docstring'''
if "patch_embed.proj" in name:
a : Any = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
a : List[str] = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
a : List[Any] = "encoder." + name
if "encoder.layers" in name:
a : int = name.replace("encoder.layers" , "encoder.stages" )
if "downsample.proj" in name:
a : Any = name.replace("downsample.proj" , "downsample.projection" )
if "blocks" in name:
a : str = name.replace("blocks" , "layers" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
a : Union[str, Any] = name.replace("modulation.f" , "modulation.projection_in" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
a : Dict = name.replace("modulation.h" , "modulation.projection_context" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
a : Any = name.replace("modulation.proj" , "modulation.projection_out" )
if name == "norm.weight":
a : str = "layernorm.weight"
if name == "norm.bias":
a : Optional[Any] = "layernorm.bias"
if "head" in name:
a : Tuple = name.replace("head" , "classifier" )
else:
a : int = "focalnet." + name
return name
def _SCREAMING_SNAKE_CASE ( _lowercase : Tuple , _lowercase : Optional[Any] , _lowercase : Tuple=False ) ->str:
'''simple docstring'''
a : List[Any] = {
"focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
}
# fmt: on
a : str = model_name_to_url[model_name]
print("Checkpoint URL: " , _lowercase )
a : Any = torch.hub.load_state_dict_from_url(_lowercase , map_location="cpu" )["model"]
# rename keys
for key in state_dict.copy().keys():
a : Any = state_dict.pop(_lowercase )
a : Any = val
a : Any = get_focalnet_config(_lowercase )
a : Optional[int] = FocalNetForImageClassification(_lowercase )
model.eval()
# load state dict
model.load_state_dict(_lowercase )
# verify conversion
a : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
a : Optional[int] = BitImageProcessor(
do_resize=_lowercase , size={"shortest_edge": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=_lowercase , crop_size=224 , do_normalize=_lowercase , image_mean=_lowercase , image_std=_lowercase , )
a : int = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
a : Dict = processor(images=_lowercase , return_tensors="pt" )
a : Optional[int] = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
a : str = image_transforms(_lowercase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , _lowercase , atol=1E-4 )
a : Dict = model(**_lowercase )
a : List[str] = outputs.logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
print("First values of logits:" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
a : Union[str, Any] = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
a : Union[str, Any] = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
a : Dict = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
a : Dict = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
a : Any = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
a : str = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , _lowercase , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
processor.save_pretrained(_lowercase )
if push_to_hub:
print(F"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(F"""{model_name}""" )
processor.push_to_hub(F"""{model_name}""" )
if __name__ == "__main__":
a : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''focalnet-tiny''',
type=str,
help='''Name of the FocalNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub.''',
)
a : Tuple = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 633 | 0 |
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
SCREAMING_SNAKE_CASE_:Tuple = {
"""<""": operator.lt,
"""<=""": operator.le,
"""==""": operator.eq,
"""!=""": operator.ne,
""">=""": operator.ge,
""">""": operator.gt,
}
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str:
"""simple docstring"""
if got_ver is None or want_ver is None:
raise ValueError(
f'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'''
f''' reinstalling {pkg}.''' )
if not ops[op](version.parse(_lowerCAmelCase ) , version.parse(_lowerCAmelCase ) ):
raise ImportError(
f'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''' )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase = None ) -> None:
"""simple docstring"""
A : int = f'''\n{hint}''' if hint is not None else """"""
# non-versioned check
if re.match(R"""^[\w_\-\d]+$""" , _lowerCAmelCase ):
A , A , A : str = requirement, None, None
else:
A : List[Any] = re.findall(R"""^([^!=<>\s]+)([\s!=<>]{1,2}.+)""" , _lowerCAmelCase )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"""
f''' got {requirement}''' )
A , A : Union[str, Any] = match[0]
A : Optional[int] = want_full.split(""",""" ) # there could be multiple requirements
A : Optional[int] = {}
for w in want_range:
A : Optional[int] = re.findall(R"""^([\s!=<>]{1,2})(.+)""" , _lowerCAmelCase )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"""
f''' but got {requirement}''' )
A , A : Union[str, Any] = match[0]
A : Any = want_ver
if op not in ops:
raise ValueError(f'''{requirement}: need one of {list(ops.keys() )}, but got {op}''' )
# special case
if pkg == "python":
A : Optional[int] = """.""".join([str(_lowerCAmelCase ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return
# check if any version is installed
try:
A : List[Any] = importlib.metadata.version(_lowerCAmelCase )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def __UpperCamelCase ( _lowerCAmelCase ) -> List[str]:
"""simple docstring"""
A : str = """Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"""
return require_version(_lowerCAmelCase , _lowerCAmelCase )
| 520 |
from __future__ import annotations
import math
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> float:
"""simple docstring"""
A : str = u
for i in range(1 , _lowerCAmelCase ):
A : str = temp * (u - i)
return temp
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
A : Dict = int(input("""enter the numbers of values: """ ) )
A : list[list[float]] = []
for _ in range(_lowerCAmelCase ):
y.append([] )
for i in range(_lowerCAmelCase ):
for j in range(_lowerCAmelCase ):
y[i].append(_lowerCAmelCase )
A : Optional[Any] = 0
print("""enter the values of parameters in a list: """ )
A : Tuple = list(map(_lowerCAmelCase , input().split() ) )
print("""enter the values of corresponding parameters: """ )
for i in range(_lowerCAmelCase ):
A : Optional[int] = float(input() )
A : List[Any] = int(input("""enter the value to interpolate: """ ) )
A : Union[str, Any] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , _lowerCAmelCase ):
for j in range(n - i ):
A : Optional[Any] = y[j + 1][i - 1] - y[j][i - 1]
A : Optional[Any] = y[0][0]
for i in range(1 , _lowerCAmelCase ):
summ += (ucal(_lowerCAmelCase , _lowerCAmelCase ) * y[0][i]) / math.factorial(_lowerCAmelCase )
print(f'''the value at {value} is {summ}''' )
if __name__ == "__main__":
main()
| 520 | 1 |
import os
from pathlib import Path
def UpperCamelCase__ ( ):
from torch.utils.cpp_extension import load
lowercase = Path(a__ ).resolve().parent.parent.parent / """kernels""" / """deformable_detr"""
lowercase = [
root / filename
for filename in [
"""vision.cpp""",
os.path.join("""cpu""" ,"""ms_deform_attn_cpu.cpp""" ),
os.path.join("""cuda""" ,"""ms_deform_attn_cuda.cu""" ),
]
]
load(
"""MultiScaleDeformableAttention""" ,a__ ,with_cuda=a__ ,extra_include_paths=[str(a__ )] ,extra_cflags=["""-DWITH_CUDA=1"""] ,extra_cuda_cflags=[
"""-DCUDA_HAS_FP16=1""",
"""-D__CUDA_NO_HALF_OPERATORS__""",
"""-D__CUDA_NO_HALF_CONVERSIONS__""",
"""-D__CUDA_NO_HALF2_OPERATORS__""",
] ,)
import MultiScaleDeformableAttention as MSDA
return MSDA
| 428 | """simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase_ ( _lowercase):
snake_case__ = '''encoder-decoder'''
snake_case__ = True
def __init__( self : str , **__UpperCamelCase : str ) -> Union[str, Any]:
super().__init__(**__UpperCamelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
_UpperCamelCase = kwargs.pop('''encoder''' )
_UpperCamelCase = encoder_config.pop('''model_type''' )
_UpperCamelCase = kwargs.pop('''decoder''' )
_UpperCamelCase = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
_UpperCamelCase = AutoConfig.for_model(__UpperCamelCase , **__UpperCamelCase )
_UpperCamelCase = AutoConfig.for_model(__UpperCamelCase , **__UpperCamelCase )
_UpperCamelCase = True
@classmethod
def _UpperCamelCase ( cls : List[str] , __UpperCamelCase : PretrainedConfig , __UpperCamelCase : PretrainedConfig , **__UpperCamelCase : Any ) -> PretrainedConfig:
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
_UpperCamelCase = True
_UpperCamelCase = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__UpperCamelCase )
def _UpperCamelCase ( self : Any ) -> Optional[Any]:
_UpperCamelCase = copy.deepcopy(self.__dict__ )
_UpperCamelCase = self.encoder.to_dict()
_UpperCamelCase = self.decoder.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
| 420 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
def _lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
lowerCAmelCase__ : Any = [[1, 2, 4], [1, 2, 3, 4]]
lowerCAmelCase__ : List[str] = DisjunctiveConstraint(UpperCamelCase )
self.assertTrue(isinstance(dc.token_ids , UpperCamelCase ) )
with self.assertRaises(UpperCamelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(UpperCamelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def _lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
lowerCAmelCase__ : List[str] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(UpperCamelCase ):
DisjunctiveConstraint(UpperCamelCase ) # fails here
def _lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = [[1, 2, 3], [1, 2, 4]]
lowerCAmelCase__ : List[str] = DisjunctiveConstraint(UpperCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = dc.update(1 )
lowerCAmelCase__ : str = stepped is True and completed is False and reset is False
self.assertTrue(UpperCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = dc.update(2 )
lowerCAmelCase__ : Optional[int] = stepped is True and completed is False and reset is False
self.assertTrue(UpperCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Tuple = dc.update(3 )
lowerCAmelCase__ : Optional[int] = stepped is True and completed is True and reset is False
self.assertTrue(UpperCamelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def _lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
lowerCAmelCase__ : List[str] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
lowerCAmelCase__ : Union[str, Any] = DisjunctiveConstraint(UpperCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : int = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 507 |
"""simple docstring"""
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class _lowerCamelCase ( a_ , unittest.TestCase ):
_lowerCamelCase :Dict = FlaxAutoencoderKL
@property
def _lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = 4
lowerCAmelCase__ : Tuple = 3
lowerCAmelCase__ : Union[str, Any] = (32, 32)
lowerCAmelCase__ : Dict = jax.random.PRNGKey(0 )
lowerCAmelCase__ : Optional[int] = jax.random.uniform(UpperCamelCase , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def _lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
lowerCAmelCase__ : Dict = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
lowerCAmelCase__ : List[Any] = self.dummy_input
return init_dict, inputs_dict
| 507 | 1 |
'''simple docstring'''
from __future__ import annotations
def a__ ( lowerCAmelCase__ ) -> list[int]: # This function is recursive
UpperCAmelCase__ : Union[str, Any] = len(UpperCamelCase_ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
UpperCAmelCase__ : Optional[Any] = array[0]
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : Optional[Any] = 1
UpperCAmelCase__ : Dict = []
while not is_found and i < array_length:
if array[i] < pivot:
UpperCAmelCase__ : Any = True
UpperCAmelCase__ : Optional[Any] = [element for element in array[i:] if element >= array[i]]
UpperCAmelCase__ : Dict = longest_subsequence(UpperCamelCase_ )
if len(UpperCamelCase_ ) > len(UpperCamelCase_ ):
UpperCAmelCase__ : List[Any] = temp_array
else:
i += 1
UpperCAmelCase__ : Optional[Any] = [element for element in array[1:] if element >= pivot]
UpperCAmelCase__ : Union[str, Any] = [pivot, *longest_subsequence(UpperCamelCase_ )]
if len(UpperCamelCase_ ) > len(UpperCamelCase_ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 75 |
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase__ ( _UpperCAmelCase, unittest.TestCase ):
a_ =FunnelTokenizer
a_ =FunnelTokenizerFast
a_ =True
a_ =True
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
super().setUp()
lowerCAmelCase__ = [
"<unk>",
"<cls>",
"<sep>",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Any:
'''simple docstring'''
return FunnelTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = "UNwant\u00E9d,running"
lowerCAmelCase__ = "unwanted, running"
return input_text, output_text
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = self.tokenizer_class(self.vocab_file )
lowerCAmelCase__ = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(__UpperCAmelCase , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [7, 4, 5, 10, 8, 9] )
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
lowerCAmelCase__ = self.get_tokenizers(do_lower_case=__UpperCAmelCase )
for tokenizer in tokenizers:
lowerCAmelCase__ = tokenizer("UNwant\u00E9d,running" )
lowerCAmelCase__ = len(inputs["input_ids"] ) - 1
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len )
lowerCAmelCase__ = tokenizer("UNwant\u00E9d,running" , "UNwant\u00E9d,running" )
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len + [1] * sentence_len )
| 339 | 0 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class A_ :
@staticmethod
def lowerCAmelCase ( *SCREAMING_SNAKE_CASE__ : Tuple ,**SCREAMING_SNAKE_CASE__ : Union[str, Any]):
pass
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> int:
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
a =(
"""https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"""
)
@is_pipeline_test
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
_UpperCAmelCase : int = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Tuple):
__lowerCamelCase : int = pipeline(
'document-question-answering' ,model=SCREAMING_SNAKE_CASE__ ,tokenizer=SCREAMING_SNAKE_CASE__ ,image_processor=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = INVOICE_URL
__lowerCamelCase : Optional[int] = list(zip(*apply_tesseract(load_image(SCREAMING_SNAKE_CASE__) ,SCREAMING_SNAKE_CASE__ ,'')))
__lowerCamelCase : Tuple = 'What is the placebo?'
__lowerCamelCase : int = [
{
'image': load_image(SCREAMING_SNAKE_CASE__),
'question': question,
},
{
'image': image,
'question': question,
},
{
'image': image,
'question': question,
'word_boxes': word_boxes,
},
]
return dqa_pipeline, examples
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Dict):
__lowerCamelCase : Tuple = dqa_pipeline(SCREAMING_SNAKE_CASE__ ,top_k=2)
self.assertEqual(
SCREAMING_SNAKE_CASE__ ,[
[
{'score': ANY(SCREAMING_SNAKE_CASE__), 'answer': ANY(SCREAMING_SNAKE_CASE__), 'start': ANY(SCREAMING_SNAKE_CASE__), 'end': ANY(SCREAMING_SNAKE_CASE__)},
{'score': ANY(SCREAMING_SNAKE_CASE__), 'answer': ANY(SCREAMING_SNAKE_CASE__), 'start': ANY(SCREAMING_SNAKE_CASE__), 'end': ANY(SCREAMING_SNAKE_CASE__)},
]
]
* 3 ,)
@require_torch
@require_detectrona
@require_pytesseract
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : Dict = pipeline('document-question-answering' ,model='hf-internal-testing/tiny-random-layoutlmv2')
__lowerCamelCase : int = INVOICE_URL
__lowerCamelCase : List[Any] = 'How many cats are there?'
__lowerCamelCase : str = [
{'score': 0.0001, 'answer': 'oy 2312/2019', 'start': 3_8, 'end': 3_9},
{'score': 0.0001, 'answer': 'oy 2312/2019 DUE', 'start': 3_8, 'end': 4_0},
]
__lowerCamelCase : Union[str, Any] = dqa_pipeline(image=SCREAMING_SNAKE_CASE__ ,question=SCREAMING_SNAKE_CASE__ ,top_k=2)
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ,decimals=4) ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = dqa_pipeline({'image': image, 'question': question} ,top_k=2)
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ,decimals=4) ,SCREAMING_SNAKE_CASE__)
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
__lowerCamelCase : Union[str, Any] = './tests/fixtures/tests_samples/COCO/000000039769.png'
__lowerCamelCase : Any = dqa_pipeline(image=SCREAMING_SNAKE_CASE__ ,question=SCREAMING_SNAKE_CASE__ ,top_k=2)
self.assertEqual(SCREAMING_SNAKE_CASE__ ,[])
# We can optionnally pass directly the words and bounding boxes
__lowerCamelCase : Optional[Any] = './tests/fixtures/tests_samples/COCO/000000039769.png'
__lowerCamelCase : List[str] = []
__lowerCamelCase : Tuple = []
__lowerCamelCase : List[str] = dqa_pipeline(image=SCREAMING_SNAKE_CASE__ ,question=SCREAMING_SNAKE_CASE__ ,words=SCREAMING_SNAKE_CASE__ ,boxes=SCREAMING_SNAKE_CASE__ ,top_k=2)
self.assertEqual(SCREAMING_SNAKE_CASE__ ,[])
@slow
@require_torch
@require_detectrona
@require_pytesseract
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : Any = pipeline(
'document-question-answering' ,model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' ,revision='9977165' ,)
__lowerCamelCase : Any = INVOICE_URL
__lowerCamelCase : List[Any] = 'What is the invoice number?'
__lowerCamelCase : List[str] = dqa_pipeline(image=SCREAMING_SNAKE_CASE__ ,question=SCREAMING_SNAKE_CASE__ ,top_k=2)
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ,decimals=4) ,[
{'score': 0.9944, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0009, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] ,)
__lowerCamelCase : Optional[Any] = dqa_pipeline({'image': image, 'question': question} ,top_k=2)
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ,decimals=4) ,[
{'score': 0.9944, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0009, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] ,)
__lowerCamelCase : List[str] = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] ,top_k=2)
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ,decimals=4) ,[
[
{'score': 0.9944, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0009, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
],
]
* 2 ,)
@slow
@require_torch
@require_detectrona
@require_pytesseract
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : str = pipeline(
'document-question-answering' ,model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' ,revision='9977165' ,max_seq_len=5_0 ,)
__lowerCamelCase : Any = INVOICE_URL
__lowerCamelCase : List[Any] = 'What is the invoice number?'
__lowerCamelCase : Dict = dqa_pipeline(image=SCREAMING_SNAKE_CASE__ ,question=SCREAMING_SNAKE_CASE__ ,top_k=2)
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ,decimals=4) ,[
{'score': 0.9974, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
{'score': 0.9948, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] ,)
__lowerCamelCase : Union[str, Any] = dqa_pipeline({'image': image, 'question': question} ,top_k=2)
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ,decimals=4) ,[
{'score': 0.9974, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
{'score': 0.9948, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] ,)
__lowerCamelCase : List[str] = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] ,top_k=2)
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ,decimals=4) ,[
[
{'score': 0.9974, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
{'score': 0.9948, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
]
]
* 2 ,)
@slow
@require_torch
@require_pytesseract
@require_vision
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' ,revision='3dc6de3' ,add_prefix_space=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = pipeline(
'document-question-answering' ,model='impira/layoutlm-document-qa' ,tokenizer=SCREAMING_SNAKE_CASE__ ,revision='3dc6de3' ,)
__lowerCamelCase : Tuple = INVOICE_URL
__lowerCamelCase : List[str] = 'What is the invoice number?'
__lowerCamelCase : str = dqa_pipeline(image=SCREAMING_SNAKE_CASE__ ,question=SCREAMING_SNAKE_CASE__ ,top_k=2)
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ,decimals=4) ,[
{'score': 0.4251, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0819, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
] ,)
__lowerCamelCase : List[str] = dqa_pipeline({'image': image, 'question': question} ,top_k=2)
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ,decimals=4) ,[
{'score': 0.4251, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0819, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
] ,)
__lowerCamelCase : List[Any] = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] ,top_k=2)
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ,decimals=4) ,[
[
{'score': 0.4251, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0819, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
]
]
* 2 ,)
__lowerCamelCase : Dict = list(zip(*apply_tesseract(load_image(SCREAMING_SNAKE_CASE__) ,SCREAMING_SNAKE_CASE__ ,'')))
# This model should also work if `image` is set to None
__lowerCamelCase : Any = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} ,top_k=2)
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ,decimals=4) ,[
{'score': 0.4251, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0819, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
] ,)
@slow
@require_torch
@require_pytesseract
@require_vision
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' ,revision='3dc6de3' ,add_prefix_space=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = pipeline(
'document-question-answering' ,model='impira/layoutlm-document-qa' ,tokenizer=SCREAMING_SNAKE_CASE__ ,revision='3dc6de3' ,max_seq_len=5_0 ,)
__lowerCamelCase : Tuple = INVOICE_URL
__lowerCamelCase : Union[str, Any] = 'What is the invoice number?'
__lowerCamelCase : Optional[Any] = dqa_pipeline(image=SCREAMING_SNAKE_CASE__ ,question=SCREAMING_SNAKE_CASE__ ,top_k=2)
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ,decimals=4) ,[
{'score': 0.9999, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.9998, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] ,)
__lowerCamelCase : List[str] = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] ,top_k=2)
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ,decimals=4) ,[
[
{'score': 0.9999, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.9998, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
]
]
* 2 ,)
__lowerCamelCase : List[str] = list(zip(*apply_tesseract(load_image(SCREAMING_SNAKE_CASE__) ,SCREAMING_SNAKE_CASE__ ,'')))
# This model should also work if `image` is set to None
__lowerCamelCase : Optional[Any] = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} ,top_k=2)
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ,decimals=4) ,[
{'score': 0.9999, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.9998, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] ,)
@slow
@require_torch
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : List[Any] = pipeline(
'document-question-answering' ,model='naver-clova-ix/donut-base-finetuned-docvqa' ,tokenizer=AutoTokenizer.from_pretrained('naver-clova-ix/donut-base-finetuned-docvqa') ,feature_extractor='naver-clova-ix/donut-base-finetuned-docvqa' ,)
__lowerCamelCase : Tuple = INVOICE_URL
__lowerCamelCase : Dict = 'What is the invoice number?'
__lowerCamelCase : Dict = dqa_pipeline(image=SCREAMING_SNAKE_CASE__ ,question=SCREAMING_SNAKE_CASE__ ,top_k=2)
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ,decimals=4) ,[{'answer': 'us-001'}])
@require_tf
@unittest.skip('Document question answering not implemented in TF')
def lowerCAmelCase ( self : Dict):
pass
| 337 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a =logging.get_logger(__name__)
a ={
"""microsoft/biogpt""": """https://huggingface.co/microsoft/biogpt/resolve/main/config.json""",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Optional[int] = '''biogpt'''
def __init__( self : Dict ,SCREAMING_SNAKE_CASE__ : Dict=4_2_3_8_4 ,SCREAMING_SNAKE_CASE__ : List[str]=1_0_2_4 ,SCREAMING_SNAKE_CASE__ : List[str]=2_4 ,SCREAMING_SNAKE_CASE__ : int=1_6 ,SCREAMING_SNAKE_CASE__ : Tuple=4_0_9_6 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="gelu" ,SCREAMING_SNAKE_CASE__ : List[str]=0.1 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 ,SCREAMING_SNAKE_CASE__ : Tuple=1_0_2_4 ,SCREAMING_SNAKE_CASE__ : int=0.02 ,SCREAMING_SNAKE_CASE__ : Optional[int]=1E-12 ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=True ,SCREAMING_SNAKE_CASE__ : str=0.0 ,SCREAMING_SNAKE_CASE__ : List[Any]=0.0 ,SCREAMING_SNAKE_CASE__ : Tuple=1 ,SCREAMING_SNAKE_CASE__ : int=0 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=2 ,**SCREAMING_SNAKE_CASE__ : List[Any] ,):
__lowerCamelCase : List[Any] = vocab_size
__lowerCamelCase : str = max_position_embeddings
__lowerCamelCase : Union[str, Any] = hidden_size
__lowerCamelCase : Optional[int] = num_hidden_layers
__lowerCamelCase : Tuple = num_attention_heads
__lowerCamelCase : Union[str, Any] = intermediate_size
__lowerCamelCase : List[Any] = hidden_act
__lowerCamelCase : Dict = hidden_dropout_prob
__lowerCamelCase : Any = attention_probs_dropout_prob
__lowerCamelCase : Dict = initializer_range
__lowerCamelCase : Union[str, Any] = layer_norm_eps
__lowerCamelCase : Dict = scale_embedding
__lowerCamelCase : Optional[int] = use_cache
__lowerCamelCase : Optional[int] = layerdrop
__lowerCamelCase : Tuple = activation_dropout
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ ,bos_token_id=SCREAMING_SNAKE_CASE__ ,eos_token_id=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
| 337 | 1 |
import os
import numpy
import onnx
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
_A = a.name
_A = b.name
_A = ''
_A = ''
_A = a == b
_A = name_a
_A = name_b
return res
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_graph_replace_input_with(node_proto.attribute[1].g , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
_A = list(model.graph.initializer )
_A = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
_A = inits[i].name
_A = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_A = os.path.dirname(_SCREAMING_SNAKE_CASE )
_A = os.path.basename(_SCREAMING_SNAKE_CASE )
_A = onnx.load(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
_A = list(model.graph.initializer )
_A = set()
_A = {}
_A = []
_A = 0
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(_SCREAMING_SNAKE_CASE ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(_SCREAMING_SNAKE_CASE )
dup_set.add(_SCREAMING_SNAKE_CASE )
_A = inits[j].data_type
_A = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('unexpected data type: ' , _SCREAMING_SNAKE_CASE )
total_reduced_size += mem_size
_A = inits[i].name
_A = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(_SCREAMING_SNAKE_CASE )
else:
_A = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1_024 / 1_024 / 1_024 , 'GB' )
_A = sorted(_SCREAMING_SNAKE_CASE )
_remove_dup_initializers_from_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_A = 'optimized_' + model_file_name
_A = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
onnx.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return new_model
| 27 |
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class lowerCamelCase:
'''simple docstring'''
def __init__( self , snake_case_ , ):
_A = parent
_A = 13
_A = 7
_A = True
_A = True
_A = True
_A = 99
_A = 32
_A = 2
_A = 4
_A = 37
_A = 'gelu'
_A = 0.1
_A = 0.1
_A = 512
_A = 16
_A = 2
_A = 0.02
_A = 3
_A = 4
_A = None
def lowerCAmelCase__ ( self ):
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = ids_tensor([self.batch_size] , self.num_choices )
_A = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self ):
(
(
_A
), (
_A
), (
_A
), (
_A
), (
_A
), (
_A
),
) = self.prepare_config_and_inputs()
_A = True
_A = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_A = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = TFEsmModel(config=snake_case_ )
_A = {'input_ids': input_ids, 'attention_mask': input_mask}
_A = model(snake_case_ )
_A = [input_ids, input_mask]
_A = model(snake_case_ )
_A = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
_A = True
_A = TFEsmModel(config=snake_case_ )
_A = {
'input_ids': input_ids,
'attention_mask': input_mask,
'encoder_hidden_states': encoder_hidden_states,
'encoder_attention_mask': encoder_attention_mask,
}
_A = model(snake_case_ )
_A = [input_ids, input_mask]
_A = model(snake_case_ , encoder_hidden_states=snake_case_ )
# Also check the case where encoder outputs are not passed
_A = model(snake_case_ , attention_mask=snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = TFEsmForMaskedLM(config=snake_case_ )
_A = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = self.num_labels
_A = TFEsmForTokenClassification(config=snake_case_ )
_A = {'input_ids': input_ids, 'attention_mask': input_mask}
_A = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self ):
_A = self.prepare_config_and_inputs()
(
(
_A
), (
_A
), (
_A
), (
_A
), (
_A
), (
_A
),
) = config_and_inputs
_A = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
__magic_name__ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
__magic_name__ = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
def lowerCAmelCase__ ( self ):
_A = TFEsmModelTester(self )
_A = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def lowerCAmelCase__ ( self ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
@slow
def lowerCAmelCase__ ( self ):
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = TFEsmModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@unittest.skip('Protein models do not support embedding resizing.' )
def lowerCAmelCase__ ( self ):
pass
@unittest.skip('Protein models do not support embedding resizing.' )
def lowerCAmelCase__ ( self ):
pass
def lowerCAmelCase__ ( self ):
_A, _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(snake_case_ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
_A = model.get_bias()
assert isinstance(snake_case_ , snake_case_ )
for k, v in name.items():
assert isinstance(snake_case_ , tf.Variable )
else:
_A = model.get_output_embeddings()
assert x is None
_A = model.get_bias()
assert name is None
@require_tf
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase__ ( self ):
_A = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
_A = tf.constant([[0, 1, 2, 3, 4, 5]] )
_A = model(snake_case_ )[0]
_A = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , snake_case_ )
# compare the actual values for a slice.
_A = tf.constant(
[
[
[8.92_1518, -10.58_9814, -6.467_1307],
[-6.396_7156, -13.91_1377, -1.121_1915],
[-7.78_1247, -13.95_1557, -3.74_0592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def lowerCAmelCase__ ( self ):
_A = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
_A = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_A = model(snake_case_ )[0]
# compare the actual values for a slice.
_A = tf.constant(
[
[
[0.1444_3092, 0.5412_5327, 0.324_7739],
[0.3034_0484, 0.0052_6676, 0.3107_7722],
[0.3227_8043, -0.2498_7096, 0.341_4628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 27 | 1 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( A__ ):
UpperCamelCase__ = ['''image_processor''', '''tokenizer''']
UpperCamelCase__ = '''BridgeTowerImageProcessor'''
UpperCamelCase__ = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self , a__ , a__):
super().__init__(a__ , a__)
def __call__( self , a__ , a__ = None , a__ = True , a__ = False , a__ = None , a__ = None , a__ = 0 , a__ = None , a__ = None , a__ = None , a__ = False , a__ = False , a__ = False , a__ = False , a__ = True , a__ = None , **a__ , ):
A__ = self.tokenizer(
text=a__ , add_special_tokens=a__ , padding=a__ , truncation=a__ , max_length=a__ , stride=a__ , pad_to_multiple_of=a__ , return_token_type_ids=a__ , return_attention_mask=a__ , return_overflowing_tokens=a__ , return_special_tokens_mask=a__ , return_offsets_mapping=a__ , return_length=a__ , verbose=a__ , return_tensors=a__ , **a__ , )
# add pixel_values + pixel_mask
A__ = self.image_processor(
a__ , return_tensors=a__ , do_normalize=a__ , do_center_crop=a__ , **a__)
encoding.update(a__)
return encoding
def snake_case_ ( self , *a__ , **a__):
return self.tokenizer.batch_decode(*a__ , **a__)
def snake_case_ ( self , *a__ , **a__):
return self.tokenizer.decode(*a__ , **a__)
@property
def snake_case_ ( self):
A__ = self.tokenizer.model_input_names
A__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 526 |
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase = {
"configuration_cpmant": ["CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CpmAntConfig"],
"tokenization_cpmant": ["CpmAntTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST",
"CpmAntForCausalLM",
"CpmAntModel",
"CpmAntPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 526 | 1 |
import os
import string
import sys
__A : Optional[Any] = 1 << 8
__A : List[str] = {
"tab": ord("\t"),
"newline": ord("\r"),
"esc": 27,
"up": 65 + ARROW_KEY_FLAG,
"down": 66 + ARROW_KEY_FLAG,
"right": 67 + ARROW_KEY_FLAG,
"left": 68 + ARROW_KEY_FLAG,
"mod_int": 91,
"undefined": sys.maxsize,
"interrupt": 3,
"insert": 50,
"delete": 51,
"pg_up": 53,
"pg_down": 54,
}
__A : Any = KEYMAP["up"]
__A : Dict = KEYMAP["left"]
if sys.platform == "win32":
__A : Dict = []
__A : Any = {
B"\xe0H": KEYMAP["up"] - ARROW_KEY_FLAG,
B"\x00H": KEYMAP["up"] - ARROW_KEY_FLAG,
B"\xe0P": KEYMAP["down"] - ARROW_KEY_FLAG,
B"\x00P": KEYMAP["down"] - ARROW_KEY_FLAG,
B"\xe0M": KEYMAP["right"] - ARROW_KEY_FLAG,
B"\x00M": KEYMAP["right"] - ARROW_KEY_FLAG,
B"\xe0K": KEYMAP["left"] - ARROW_KEY_FLAG,
B"\x00K": KEYMAP["left"] - ARROW_KEY_FLAG,
}
for i in range(10):
__A : Optional[int] = ord(str(i))
def __lowerCAmelCase( ) -> Dict:
"""simple docstring"""
if os.name == "nt":
import msvcrt
_A = 'mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(_SCREAMING_SNAKE_CASE ) == 0:
# Read the keystroke
_A = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
_A = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
_A = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) )
WIN_CH_BUFFER.append(_SCREAMING_SNAKE_CASE )
if ord(_SCREAMING_SNAKE_CASE ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
_A = chr(KEYMAP['esc'] )
except KeyError:
_A = cha[1]
else:
_A = ch.decode(_SCREAMING_SNAKE_CASE )
else:
_A = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
_A = sys.stdin.fileno()
_A = termios.tcgetattr(_SCREAMING_SNAKE_CASE )
try:
tty.setraw(_SCREAMING_SNAKE_CASE )
_A = sys.stdin.read(1 )
finally:
termios.tcsetattr(_SCREAMING_SNAKE_CASE , termios.TCSADRAIN , _SCREAMING_SNAKE_CASE )
return ch
def __lowerCAmelCase( ) -> Union[str, Any]:
"""simple docstring"""
_A = get_raw_chars()
if ord(_SCREAMING_SNAKE_CASE ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(_SCREAMING_SNAKE_CASE ) == KEYMAP["esc"]:
_A = get_raw_chars()
if ord(_SCREAMING_SNAKE_CASE ) == KEYMAP["mod_int"]:
_A = get_raw_chars()
if ord(_SCREAMING_SNAKE_CASE ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_SCREAMING_SNAKE_CASE ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(_SCREAMING_SNAKE_CASE ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 27 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__A : List[Any] = "python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
"""simple docstring"""
require_version(deps[pkg] , _SCREAMING_SNAKE_CASE )
| 27 | 1 |
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
return [sentence[i : i + ngram_size] for i in range(len(lowerCamelCase ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 367 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __A :
def __init__( self :Optional[int] , __snake_case :Optional[Any] , __snake_case :List[Any]=13 , __snake_case :str=7 , __snake_case :Tuple=True , __snake_case :Dict=True , __snake_case :Optional[Any]=True , __snake_case :str=True , __snake_case :int=99 , __snake_case :int=32 , __snake_case :Dict=2 , __snake_case :Optional[Any]=4 , __snake_case :Dict=37 , __snake_case :Optional[int]="gelu" , __snake_case :Tuple=0.1 , __snake_case :Tuple=0.1 , __snake_case :int=5_12 , __snake_case :int=16 , __snake_case :int=2 , __snake_case :Optional[int]=0.02 , __snake_case :Union[str, Any]=3 , __snake_case :Any=4 , __snake_case :str=None , __snake_case :str=0 , ):
'''simple docstring'''
__magic_name__ : Optional[int] =parent
__magic_name__ : int =batch_size
__magic_name__ : Any =seq_length
__magic_name__ : List[str] =is_training
__magic_name__ : Any =use_input_mask
__magic_name__ : Union[str, Any] =use_token_type_ids
__magic_name__ : Union[str, Any] =use_labels
__magic_name__ : Optional[Any] =vocab_size
__magic_name__ : Optional[Any] =hidden_size
__magic_name__ : Optional[Any] =num_hidden_layers
__magic_name__ : Optional[Any] =num_attention_heads
__magic_name__ : Tuple =intermediate_size
__magic_name__ : Tuple =hidden_act
__magic_name__ : Tuple =hidden_dropout_prob
__magic_name__ : Any =attention_probs_dropout_prob
__magic_name__ : Union[str, Any] =max_position_embeddings
__magic_name__ : int =type_vocab_size
__magic_name__ : Optional[Any] =type_sequence_label_size
__magic_name__ : Union[str, Any] =initializer_range
__magic_name__ : Optional[Any] =num_labels
__magic_name__ : Any =num_choices
__magic_name__ : Optional[Any] =scope
__magic_name__ : str =projection_dim
def A__ ( self :Dict ):
'''simple docstring'''
__magic_name__ : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : Union[str, Any] =None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
__magic_name__ : List[Any] =random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ : Optional[int] =None
if self.use_token_type_ids:
__magic_name__ : str =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ : List[str] =None
__magic_name__ : List[Any] =None
__magic_name__ : List[str] =None
if self.use_labels:
__magic_name__ : str =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ : Union[str, Any] =ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ : Optional[int] =BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
__magic_name__ : Dict =DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self :List[Any] , __snake_case :List[Any] , __snake_case :List[str] , __snake_case :Any , __snake_case :Any , __snake_case :Optional[Any] , __snake_case :Optional[Any] , __snake_case :List[str] ):
'''simple docstring'''
__magic_name__ : List[Any] =TFDPRContextEncoder(config=__snake_case )
__magic_name__ : Union[str, Any] =model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case )
__magic_name__ : str =model(__snake_case , token_type_ids=__snake_case )
__magic_name__ : Optional[int] =model(__snake_case )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def A__ ( self :int , __snake_case :str , __snake_case :Tuple , __snake_case :Optional[Any] , __snake_case :Optional[Any] , __snake_case :Tuple , __snake_case :List[Any] , __snake_case :int ):
'''simple docstring'''
__magic_name__ : int =TFDPRQuestionEncoder(config=__snake_case )
__magic_name__ : Union[str, Any] =model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case )
__magic_name__ : List[str] =model(__snake_case , token_type_ids=__snake_case )
__magic_name__ : Union[str, Any] =model(__snake_case )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def A__ ( self :List[Any] , __snake_case :List[str] , __snake_case :Optional[int] , __snake_case :str , __snake_case :Any , __snake_case :str , __snake_case :Optional[Any] , __snake_case :Dict ):
'''simple docstring'''
__magic_name__ : Optional[int] =TFDPRReader(config=__snake_case )
__magic_name__ : List[str] =model(__snake_case , attention_mask=__snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : str =self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : Union[str, Any] =config_and_inputs
__magic_name__ : List[str] ={"""input_ids""": input_ids}
return config, inputs_dict
@require_tf
class __A ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
UpperCamelCase = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
UpperCamelCase = {"""feature-extraction""": TFDPRQuestionEncoder} if is_tf_available() else {}
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =TFDPRModelTester(self )
__magic_name__ : Optional[Any] =ConfigTester(self , config_class=__snake_case , hidden_size=37 )
def A__ ( self :Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*__snake_case )
def A__ ( self :List[Any] ):
'''simple docstring'''
__magic_name__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*__snake_case )
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*__snake_case )
@slow
def A__ ( self :Tuple ):
'''simple docstring'''
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : Dict =TFDPRContextEncoder.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : Dict =TFDPRContextEncoder.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : Union[str, Any] =TFDPRQuestionEncoder.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : List[str] =TFDPRReader.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@require_tf
class __A ( unittest.TestCase ):
@slow
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : int =TFDPRQuestionEncoder.from_pretrained("""facebook/dpr-question_encoder-single-nq-base""" )
__magic_name__ : int =tf.constant(
[[1_01, 75_92, 10_10, 20_03, 20_26, 38_99, 1_01_40, 10_29, 1_02]] ) # [CLS] hello, is my dog cute? [SEP]
__magic_name__ : Optional[Any] =model(__snake_case )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
__magic_name__ : Tuple =tf.constant(
[
[
0.03236253,
0.12753335,
0.16818509,
0.00279786,
0.3896933,
0.24264945,
0.2178971,
-0.02335227,
-0.08481959,
-0.14324117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 367 | 1 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCAmelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""")
@dataclass
class lowercase__ :
__UpperCAmelCase = field(
default='''cifar10''' ,metadata={'''help''': '''Name of a dataset from the datasets package'''} )
__UpperCAmelCase = field(
default=A_ ,metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__UpperCAmelCase = field(
default=A_ ,metadata={'''help''': '''The column name of the images in the files.'''} )
__UpperCAmelCase = field(default=A_ ,metadata={'''help''': '''A folder containing the training data.'''} )
__UpperCAmelCase = field(default=A_ ,metadata={'''help''': '''A folder containing the validation data.'''} )
__UpperCAmelCase = field(
default=0.1_5 ,metadata={'''help''': '''Percent to split off of train for validation.'''} )
__UpperCAmelCase = field(
default=A_ ,metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} ,)
__UpperCAmelCase = field(
default=A_ ,metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} ,)
def UpperCamelCase_ ( self) -> Any:
_lowerCamelCase : Any = {}
if self.train_dir is not None:
_lowerCamelCase : int = self.train_dir
if self.validation_dir is not None:
_lowerCamelCase : Tuple = self.validation_dir
_lowerCamelCase : Optional[int] = data_files if data_files else None
@dataclass
class lowercase__ :
__UpperCAmelCase = field(
default=A_ ,metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} ,)
__UpperCAmelCase = field(
default=A_ ,metadata={'''help''': '''Pretrained config name or path if not the same as model_name_or_path'''} )
__UpperCAmelCase = field(
default=A_ ,metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} ,)
__UpperCAmelCase = field(
default=A_ ,metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} )
__UpperCAmelCase = field(
default='''main''' ,metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} ,)
__UpperCAmelCase = field(default=A_ ,metadata={'''help''': '''Name or path of preprocessor config.'''} )
__UpperCAmelCase = field(
default=A_ ,metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} ,)
__UpperCAmelCase = field(
default=0.7_5 ,metadata={'''help''': '''The ratio of the number of masked tokens in the input sequence.'''} )
__UpperCAmelCase = field(
default=A_ ,metadata={'''help''': '''Whether or not to train with normalized pixel values as target.'''} )
@dataclass
class lowercase__ ( A_ ):
__UpperCAmelCase = field(
default=1e-3 ,metadata={'''help''': '''Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'''} )
def _snake_case ( __snake_case : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : int = torch.stack([example["""pixel_values"""] for example in examples] )
return {"pixel_values": pixel_values}
def _snake_case ( ):
"""simple docstring"""
_lowerCamelCase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mae""" , __snake_case , __snake_case )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_lowerCamelCase : Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(__snake_case )
transformers.utils.logging.set_verbosity(__snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
_lowerCamelCase : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowerCamelCase : Optional[int] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
_lowerCamelCase : Optional[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
_lowerCamelCase : Tuple = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __snake_case ) and data_args.train_val_split > 0.0:
_lowerCamelCase : List[str] = ds["""train"""].train_test_split(data_args.train_val_split )
_lowerCamelCase : Union[str, Any] = split["""train"""]
_lowerCamelCase : Optional[int] = split["""test"""]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCamelCase : str = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
_lowerCamelCase : Dict = ViTMAEConfig.from_pretrained(model_args.config_name , **__snake_case )
elif model_args.model_name_or_path:
_lowerCamelCase : Union[str, Any] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **__snake_case )
else:
_lowerCamelCase : Optional[Any] = ViTMAEConfig()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(F'New config: {config}' )
# adapt config
config.update(
{
"""mask_ratio""": model_args.mask_ratio,
"""norm_pix_loss""": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
_lowerCamelCase : str = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **__snake_case )
elif model_args.model_name_or_path:
_lowerCamelCase : Dict = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **__snake_case )
else:
_lowerCamelCase : Union[str, Any] = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
_lowerCamelCase : List[Any] = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
_lowerCamelCase : Union[str, Any] = ViTMAEForPreTraining(__snake_case )
if training_args.do_train:
_lowerCamelCase : List[Any] = ds["""train"""].column_names
else:
_lowerCamelCase : Union[str, Any] = ds["""validation"""].column_names
if data_args.image_column_name is not None:
_lowerCamelCase : str = data_args.image_column_name
elif "image" in column_names:
_lowerCamelCase : Optional[Any] = """image"""
elif "img" in column_names:
_lowerCamelCase : List[Any] = """img"""
else:
_lowerCamelCase : str = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
_lowerCamelCase : Dict = image_processor.size["""shortest_edge"""]
else:
_lowerCamelCase : List[Any] = (image_processor.size["""height"""], image_processor.size["""width"""])
_lowerCamelCase : Tuple = Compose(
[
Lambda(lambda __snake_case : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(__snake_case , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(__snake_case : Optional[Any] ):
_lowerCamelCase : Dict = [transforms(__snake_case ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
_lowerCamelCase : int = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(__snake_case )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
_lowerCamelCase : Union[str, Any] = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(__snake_case )
# Compute absolute learning rate
_lowerCamelCase : Optional[Any] = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
_lowerCamelCase : Tuple = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
_lowerCamelCase : Optional[Any] = Trainer(
model=__snake_case , args=__snake_case , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=__snake_case , data_collator=__snake_case , )
# Training
if training_args.do_train:
_lowerCamelCase : Any = None
if training_args.resume_from_checkpoint is not None:
_lowerCamelCase : List[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowerCamelCase : Union[str, Any] = last_checkpoint
_lowerCamelCase : Optional[Any] = trainer.train(resume_from_checkpoint=__snake_case )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_lowerCamelCase : int = trainer.evaluate()
trainer.log_metrics("""eval""" , __snake_case )
trainer.save_metrics("""eval""" , __snake_case )
# Write model card and (optionally) push to hub
_lowerCamelCase : Optional[Any] = {
"""tasks""": """masked-auto-encoding""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-auto-encoding"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__snake_case )
else:
trainer.create_model_card(**__snake_case )
def _snake_case ( __snake_case : Dict ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 88 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 60 | 0 |
"""simple docstring"""
from collections import defaultdict
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self :Optional[int] , __lowercase :str , __lowercase :Dict ):
__lowerCamelCase : Dict =total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
__lowerCamelCase : List[Any] =[
[-1 for i in range(total + 1 )] for j in range(2 ** len(__lowercase ) )
]
__lowerCamelCase : List[Any] =defaultdict(__lowercase ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
__lowerCamelCase : List[Any] =(1 << len(__lowercase )) - 1
def __lowercase ( self :Tuple , __lowercase :int , __lowercase :List[str] ):
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
__lowerCamelCase : int =self.count_ways_until(__lowercase , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
__lowerCamelCase : Dict =total_ways_util
return self.dp[mask][task_no]
def __lowercase ( self :Optional[Any] , __lowercase :int ):
# Store the list of persons for each task
for i in range(len(__lowercase ) ):
for j in task_performed[i]:
self.task[j].append(__lowercase )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
_UpperCamelCase = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
_UpperCamelCase = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 363 |
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self :List[str] , __lowercase :Any , __lowercase :str=100 , __lowercase :Dict=13 , __lowercase :Tuple=30 , __lowercase :Tuple=2 , __lowercase :Dict=3 , __lowercase :Optional[Any]=True , __lowercase :int=True , __lowercase :List[Any]=32 , __lowercase :str=4 , __lowercase :int=4 , __lowercase :List[str]=37 , __lowercase :Dict="gelu" , __lowercase :List[Any]=0.1 , __lowercase :Tuple=0.1 , __lowercase :Union[str, Any]=10 , __lowercase :Any=0.02 , __lowercase :Tuple=3 , __lowercase :Any=None , __lowercase :Optional[int]=[0, 1, 2, 3] , ):
__lowerCamelCase : int =parent
__lowerCamelCase : Optional[Any] =100
__lowerCamelCase : Optional[int] =batch_size
__lowerCamelCase : Tuple =image_size
__lowerCamelCase : Tuple =patch_size
__lowerCamelCase : Optional[Any] =num_channels
__lowerCamelCase : Union[str, Any] =is_training
__lowerCamelCase : int =use_labels
__lowerCamelCase : Optional[int] =hidden_size
__lowerCamelCase : Optional[Any] =num_hidden_layers
__lowerCamelCase : List[Any] =num_attention_heads
__lowerCamelCase : Any =intermediate_size
__lowerCamelCase : int =hidden_act
__lowerCamelCase : Tuple =hidden_dropout_prob
__lowerCamelCase : List[Any] =attention_probs_dropout_prob
__lowerCamelCase : Union[str, Any] =type_sequence_label_size
__lowerCamelCase : List[str] =initializer_range
__lowerCamelCase : Tuple =scope
__lowerCamelCase : Union[str, Any] =out_indices
__lowerCamelCase : Tuple =num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowerCamelCase : Optional[int] =(image_size // patch_size) ** 2
__lowerCamelCase : Optional[Any] =num_patches + 1
def __lowercase ( self :int ):
__lowerCamelCase : Dict =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase : Any =None
__lowerCamelCase : str =None
if self.use_labels:
__lowerCamelCase : Dict =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase : List[Any] =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__lowerCamelCase : List[str] =self.get_config()
return config, pixel_values, labels, pixel_labels
def __lowercase ( self :Tuple ):
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowercase , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def __lowercase ( self :Any , __lowercase :List[Any] , __lowercase :str , __lowercase :Union[str, Any] , __lowercase :Dict ):
__lowerCamelCase : Tuple =BeitModel(config=__lowercase )
model.to(__lowercase )
model.eval()
__lowerCamelCase : int =model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self :Tuple , __lowercase :Dict , __lowercase :Tuple , __lowercase :Optional[Any] , __lowercase :Any ):
__lowerCamelCase : List[str] =BeitForMaskedImageModeling(config=__lowercase )
model.to(__lowercase )
model.eval()
__lowerCamelCase : int =model(__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def __lowercase ( self :Dict , __lowercase :str , __lowercase :Tuple , __lowercase :int , __lowercase :str ):
__lowerCamelCase : int =self.type_sequence_label_size
__lowerCamelCase : Optional[Any] =BeitForImageClassification(__lowercase )
model.to(__lowercase )
model.eval()
__lowerCamelCase : Optional[Any] =model(__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowerCamelCase : int =1
__lowerCamelCase : str =BeitForImageClassification(__lowercase )
model.to(__lowercase )
model.eval()
__lowerCamelCase : Tuple =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCamelCase : str =model(__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowercase ( self :List[Any] , __lowercase :Dict , __lowercase :List[str] , __lowercase :int , __lowercase :str ):
__lowerCamelCase : Union[str, Any] =self.num_labels
__lowerCamelCase : Any =BeitForSemanticSegmentation(__lowercase )
model.to(__lowercase )
model.eval()
__lowerCamelCase : Any =model(__lowercase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
__lowerCamelCase : Optional[Any] =model(__lowercase , labels=__lowercase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def __lowercase ( self :Optional[int] ):
__lowerCamelCase : Optional[int] =self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str =config_and_inputs
__lowerCamelCase : List[Any] ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
__snake_case : List[Any] = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
__snake_case : Any = (
{
"""feature-extraction""": BeitModel,
"""image-classification""": BeitForImageClassification,
"""image-segmentation""": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__snake_case : Union[str, Any] = False
__snake_case : Optional[Any] = False
__snake_case : str = False
def __lowercase ( self :Tuple ):
__lowerCamelCase : Optional[Any] =BeitModelTester(self )
__lowerCamelCase : Optional[int] =ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase , hidden_size=37 )
def __lowercase ( self :Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''BEiT does not use inputs_embeds''' )
def __lowercase ( self :List[str] ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='''BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def __lowercase ( self :Union[str, Any] ):
pass
def __lowercase ( self :Any ):
__lowerCamelCase , __lowerCamelCase : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Optional[int] =model_class(__lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCamelCase : Union[str, Any] =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowercase , nn.Linear ) )
def __lowercase ( self :Any ):
__lowerCamelCase , __lowerCamelCase : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Tuple =model_class(__lowercase )
__lowerCamelCase : Tuple =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : Tuple =[*signature.parameters.keys()]
__lowerCamelCase : List[Any] =['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowercase )
def __lowercase ( self :str ):
__lowerCamelCase : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def __lowercase ( self :str ):
__lowerCamelCase : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowercase )
def __lowercase ( self :Dict ):
__lowerCamelCase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
def __lowercase ( self :Tuple ):
__lowerCamelCase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowercase )
def __lowercase ( self :str ):
if not self.model_tester.is_training:
return
__lowerCamelCase , __lowerCamelCase : Any =self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : Optional[Any] =True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(__lowercase ), BeitForMaskedImageModeling]:
continue
__lowerCamelCase : Any =model_class(__lowercase )
model.to(__lowercase )
model.train()
__lowerCamelCase : Dict =self._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase )
__lowerCamelCase : List[Any] =model(**__lowercase ).loss
loss.backward()
def __lowercase ( self :List[Any] ):
__lowerCamelCase , __lowerCamelCase : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__lowerCamelCase : str =False
__lowerCamelCase : int =True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(__lowercase ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
__lowerCamelCase : Any =model_class(__lowercase )
model.gradient_checkpointing_enable()
model.to(__lowercase )
model.train()
__lowerCamelCase : Any =self._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase )
__lowerCamelCase : Union[str, Any] =model(**__lowercase ).loss
loss.backward()
def __lowercase ( self :Dict ):
__lowerCamelCase , __lowerCamelCase : str =self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : Tuple =_config_zero_init(__lowercase )
for model_class in self.all_model_classes:
__lowerCamelCase : List[Any] =model_class(config=__lowercase )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def __lowercase ( self :int ):
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : str =BeitModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def lowerCAmelCase_ ( ):
'''simple docstring'''
__lowerCamelCase : Tuple =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self :Optional[Any] ):
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def __lowercase ( self :Any ):
__lowerCamelCase : Union[str, Any] =BeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' ).to(__lowercase )
__lowerCamelCase : Union[str, Any] =self.default_image_processor
__lowerCamelCase : List[str] =prepare_img()
__lowerCamelCase : Tuple =image_processor(images=__lowercase , return_tensors='''pt''' ).pixel_values.to(__lowercase )
# prepare bool_masked_pos
__lowerCamelCase : Dict =torch.ones((1, 196) , dtype=torch.bool ).to(__lowercase )
# forward pass
with torch.no_grad():
__lowerCamelCase : List[Any] =model(pixel_values=__lowercase , bool_masked_pos=__lowercase )
__lowerCamelCase : List[Any] =outputs.logits
# verify the logits
__lowerCamelCase : str =torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape , __lowercase )
__lowerCamelCase : Tuple =torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(__lowercase )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , __lowercase , atol=1e-2 ) )
@slow
def __lowercase ( self :Tuple ):
__lowerCamelCase : Optional[int] =BeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' ).to(__lowercase )
__lowerCamelCase : str =self.default_image_processor
__lowerCamelCase : List[Any] =prepare_img()
__lowerCamelCase : List[Any] =image_processor(images=__lowercase , return_tensors='''pt''' ).to(__lowercase )
# forward pass
with torch.no_grad():
__lowerCamelCase : int =model(**__lowercase )
__lowerCamelCase : List[Any] =outputs.logits
# verify the logits
__lowerCamelCase : Union[str, Any] =torch.Size((1, 1000) )
self.assertEqual(logits.shape , __lowercase )
__lowerCamelCase : Optional[Any] =torch.tensor([-1.2385, -1.0987, -1.0108] ).to(__lowercase )
self.assertTrue(torch.allclose(logits[0, :3] , __lowercase , atol=1e-4 ) )
__lowerCamelCase : str =281
self.assertEqual(logits.argmax(-1 ).item() , __lowercase )
@slow
def __lowercase ( self :int ):
__lowerCamelCase : Optional[int] =BeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' ).to(
__lowercase )
__lowerCamelCase : str =self.default_image_processor
__lowerCamelCase : str =prepare_img()
__lowerCamelCase : int =image_processor(images=__lowercase , return_tensors='''pt''' ).to(__lowercase )
# forward pass
with torch.no_grad():
__lowerCamelCase : str =model(**__lowercase )
__lowerCamelCase : int =outputs.logits
# verify the logits
__lowerCamelCase : Dict =torch.Size((1, 2_1841) )
self.assertEqual(logits.shape , __lowercase )
__lowerCamelCase : List[Any] =torch.tensor([1.6881, -0.2787, 0.5901] ).to(__lowercase )
self.assertTrue(torch.allclose(logits[0, :3] , __lowercase , atol=1e-4 ) )
__lowerCamelCase : List[Any] =2396
self.assertEqual(logits.argmax(-1 ).item() , __lowercase )
@slow
def __lowercase ( self :Optional[int] ):
__lowerCamelCase : Tuple =BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
__lowerCamelCase : int =model.to(__lowercase )
__lowerCamelCase : Tuple =BeitImageProcessor(do_resize=__lowercase , size=640 , do_center_crop=__lowercase )
__lowerCamelCase : Optional[Any] =load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
__lowerCamelCase : Optional[int] =Image.open(ds[0]['''file'''] )
__lowerCamelCase : Union[str, Any] =image_processor(images=__lowercase , return_tensors='''pt''' ).to(__lowercase )
# forward pass
with torch.no_grad():
__lowerCamelCase : List[Any] =model(**__lowercase )
__lowerCamelCase : Tuple =outputs.logits
# verify the logits
__lowerCamelCase : Optional[Any] =torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , __lowercase )
__lowerCamelCase : List[Any] =version.parse(PIL.__version__ ) < version.parse('''9.0.0''' )
if is_pillow_less_than_a:
__lowerCamelCase : Any =torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=__lowercase , )
else:
__lowerCamelCase : int =torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=__lowercase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __lowercase , atol=1e-4 ) )
@slow
def __lowercase ( self :Any ):
__lowerCamelCase : Any =BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
__lowerCamelCase : Union[str, Any] =model.to(__lowercase )
__lowerCamelCase : List[str] =BeitImageProcessor(do_resize=__lowercase , size=640 , do_center_crop=__lowercase )
__lowerCamelCase : List[str] =load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
__lowerCamelCase : Optional[Any] =Image.open(ds[0]['''file'''] )
__lowerCamelCase : List[Any] =image_processor(images=__lowercase , return_tensors='''pt''' ).to(__lowercase )
# forward pass
with torch.no_grad():
__lowerCamelCase : Tuple =model(**__lowercase )
__lowerCamelCase : str =outputs.logits.detach().cpu()
__lowerCamelCase : Union[str, Any] =image_processor.post_process_semantic_segmentation(outputs=__lowercase , target_sizes=[(500, 300)] )
__lowerCamelCase : Union[str, Any] =torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , __lowercase )
__lowerCamelCase : Optional[Any] =image_processor.post_process_semantic_segmentation(outputs=__lowercase )
__lowerCamelCase : List[str] =torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , __lowercase )
| 363 | 1 |
"""simple docstring"""
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
a_ = {
'/attention/': '/0/SelfAttention/',
'/self_attention/': '/0/SelfAttention/',
'/encoder_decoder_attention/': '/1/EncDecAttention/',
'value': 'v',
'query': 'q',
'key': 'k',
'out': 'o',
'pre_self_attention_layer_norm': '0/layer_norm',
'pre_cross_attention_layer_norm': '1/layer_norm',
'pre_attention_layer_norm': '0/layer_norm', # previously 1, but seems wrong
'token_embedder': 'shared',
'encoder_norm': 'final_layer_norm',
'decoder_norm': 'final_layer_norm',
'relpos_bias/rel_embedding': 'block/0/layer/0/SelfAttention/relative_attention_bias/weight',
'router/router_weights/w/': 'router/classifier/',
'roer/roer_weights/w/': 'router/classifier/',
'logits_dense': 'lm_head',
}
def __UpperCAmelCase ( __UpperCamelCase ):
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
__lowercase : List[Any] = list(s_dict.keys() )
for key in keys:
__lowercase : Optional[int] = R'''.*/layers_(\d+)'''
__lowercase : Tuple = key
if re.match(__UpperCamelCase , __UpperCamelCase ):
__lowercase : Optional[Any] = re.sub(R'''layers_(\d+)''' , R'''block/\1/layer''' , __UpperCamelCase )
__lowercase : str = R'''(encoder|decoder)\/'''
if re.match(__UpperCamelCase , __UpperCamelCase ):
__lowercase : Union[str, Any] = re.match(__UpperCamelCase , __UpperCamelCase ).groups()
if groups[0] == "encoder":
__lowercase : Dict = re.sub(R'''/mlp/''' , R'''/1/mlp/''' , __UpperCamelCase )
__lowercase : Union[str, Any] = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/1/layer_norm/''' , __UpperCamelCase )
elif groups[0] == "decoder":
__lowercase : Optional[Any] = re.sub(R'''/mlp/''' , R'''/2/mlp/''' , __UpperCamelCase )
__lowercase : Dict = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/2/layer_norm/''' , __UpperCamelCase )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
__lowercase : Optional[Any] = new_key.replace(__UpperCamelCase , __UpperCamelCase )
print(f"""{key} -> {new_key}""" )
__lowercase : Union[str, Any] = s_dict.pop(__UpperCamelCase )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
__lowercase : Optional[Any] = s_dict[
'''encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
__lowercase : List[str] = s_dict[
'''decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
__lowercase : List[str] = s_dict[key].shape[0]
__lowercase : str = s_dict[key]
for idx in range(__UpperCamelCase ):
__lowercase : str = expert_weihts[idx]
print(f"""{key} -> {key.replace("expert/" , "nested fstring" )}""" )
s_dict.pop(__UpperCamelCase )
return s_dict
a_ = {
'NUM_ENCODER_LAYERS': 'num_layers',
'NUM_DECODER_LAYERS': 'num_decoder_layers',
'NUM_HEADS': 'num_heads',
'HEAD_DIM': 'd_kv',
'EMBED_DIM': 'd_model',
'MLP_DIM': 'd_ff',
'NUM_SELECTED_EXPERTS': 'num_selected_experts',
'NUM_ENCODER_SPARSE_LAYERS': 'num_sparse_encoder_layers',
'NUM_DECODER_SPARSE_LAYERS': 'num_sparse_decoder_layers',
'dense.MlpBlock.activations': 'feed_forward_proj',
}
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
# Convert a google style config to the hugging face fromat
import regex as re
with open(__UpperCamelCase , '''r''' ) as f:
__lowercase : Dict = f.read()
__lowercase : Tuple = re.findall(R'''(.*) = ([0-9.]*)''' , __UpperCamelCase )
__lowercase : Union[str, Any] = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
__lowercase : Tuple = float(__UpperCamelCase ) if '''.''' in value else int(__UpperCamelCase )
__lowercase : Any = re.findall(R'''(.*activations) = \(\'(.*)\',\)''' , __UpperCamelCase )[0]
__lowercase : Optional[int] = str(activation[1] )
__lowercase : int = num_experts
__lowercase : Optional[int] = SwitchTransformersConfig(**__UpperCamelCase )
return config
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase="./" , __UpperCamelCase=8 ):
# Initialise PyTorch model
print(f"""Loading flax weights from : {flax_checkpoint_path}""" )
__lowercase : Optional[int] = checkpoints.load_tax_checkpoint(__UpperCamelCase )
if gin_file is not None:
__lowercase : Union[str, Any] = convert_gin_to_config(__UpperCamelCase , __UpperCamelCase )
else:
__lowercase : List[Any] = SwitchTransformersConfig.from_pretrained(__UpperCamelCase )
__lowercase : Optional[Any] = SwitchTransformersForConditionalGeneration(__UpperCamelCase )
__lowercase : int = flax_params['''target''']
__lowercase : List[Any] = flatten_dict(__UpperCamelCase , sep='''/''' )
__lowercase : int = rename_keys(__UpperCamelCase )
__lowercase : Any = unflatten_dict(__UpperCamelCase , sep='''/''' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(__UpperCamelCase , __UpperCamelCase )
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
pt_model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'
' model architecture. If not provided, a `gin_file` has to be provided.'
),
)
parser.add_argument(
'--gin_file',
default=None,
type=str,
required=False,
help='Path to the gin config file. If not provided, a `config_file` has to be passed ',
)
parser.add_argument(
'--config_name', default=None, type=str, required=False, help='Config name of SwitchTransformers model.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output pytorch model.'
)
parser.add_argument('--num_experts', default=8, type=int, required=False, help='Number of experts')
a_ = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 76 |
'''simple docstring'''
import numpy as np
def lowercase__ ( __UpperCamelCase : np.array ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 566 | 0 |
'''simple docstring'''
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
_UpperCAmelCase : Any = logging.get_logger(__name__)
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : List[str] = ['pixel_values']
def __init__(self , __lowercase = True , __lowercase = 1 / 2_55 , __lowercase = True , __lowercase = 8 , **__lowercase , ):
super().__init__(**__lowercase )
__lowerCAmelCase = do_rescale
__lowerCAmelCase = rescale_factor
__lowerCAmelCase = do_pad
__lowerCAmelCase = pad_size
def _snake_case (self , __lowercase , __lowercase , __lowercase = None , **__lowercase ):
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def _snake_case (self , __lowercase , __lowercase , __lowercase = None ):
__lowerCAmelCase , __lowerCAmelCase = get_image_size(__lowercase )
__lowerCAmelCase = (old_height // size + 1) * size - old_height
__lowerCAmelCase = (old_width // size + 1) * size - old_width
return pad(__lowercase , ((0, pad_height), (0, pad_width)) , mode='''symmetric''' , data_format=__lowercase )
def _snake_case (self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ):
__lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCAmelCase = do_pad if do_pad is not None else self.do_pad
__lowerCAmelCase = pad_size if pad_size is not None else self.pad_size
__lowerCAmelCase = make_list_of_images(__lowercase )
if not valid_images(__lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
__lowerCAmelCase = [to_numpy_array(__lowercase ) for image in images]
if do_rescale:
__lowerCAmelCase = [self.rescale(image=__lowercase , scale=__lowercase ) for image in images]
if do_pad:
__lowerCAmelCase = [self.pad(__lowercase , size=__lowercase ) for image in images]
__lowerCAmelCase = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
__lowerCAmelCase = {'''pixel_values''': images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase )
| 705 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
_UpperCAmelCase : str = ["""gpt2"""]
_UpperCAmelCase : Optional[int] = """gpt2"""
if is_tf_available():
class a__ ( tf.Module ):
"""simple docstring"""
def __init__(self , __lowercase ):
super().__init__()
__lowerCAmelCase = tokenizer
__lowerCAmelCase = AutoConfig.from_pretrained(__lowercase )
__lowerCAmelCase = TFGPTaLMHeadModel.from_config(__lowercase )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='''text''' ),) )
def _snake_case (self , __lowercase ):
__lowerCAmelCase = self.tokenizer(__lowercase )
__lowerCAmelCase = tokenized['''input_ids'''].to_tensor()
__lowerCAmelCase = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
__lowerCAmelCase = self.model(input_ids=__lowercase , attention_mask=__lowercase )['''logits''']
return outputs
@require_tf
@require_keras_nlp
class a__ ( unittest.TestCase ):
"""simple docstring"""
def _snake_case (self ):
super().setUp()
__lowerCAmelCase = [GPTaTokenizer.from_pretrained(__lowercase ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
__lowerCAmelCase = [TFGPTaTokenizer.from_pretrained(__lowercase ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__lowerCAmelCase = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
__lowerCAmelCase = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def _snake_case (self ):
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
__lowerCAmelCase = tokenizer([test_inputs] , return_tensors='''tf''' )
__lowerCAmelCase = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
__lowerCAmelCase = python_outputs[key].numpy()
__lowerCAmelCase = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(__lowercase , tf.intaa ) == tf_outputs_values ) )
@slow
def _snake_case (self ):
for tf_tokenizer in self.tf_tokenizers:
__lowerCAmelCase = tf.function(__lowercase )
for test_inputs in self.test_sentences:
__lowerCAmelCase = tf.constant(__lowercase )
__lowerCAmelCase = compiled_tokenizer(__lowercase )
__lowerCAmelCase = tf_tokenizer(__lowercase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def _snake_case (self ):
for tf_tokenizer in self.tf_tokenizers:
__lowerCAmelCase = ModelToSave(tokenizer=__lowercase )
__lowerCAmelCase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowerCAmelCase = model.serving(__lowercase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__lowerCAmelCase = Path(__lowercase ) / '''saved.model'''
tf.saved_model.save(__lowercase , __lowercase , signatures={'''serving_default''': model.serving} )
__lowerCAmelCase = tf.saved_model.load(__lowercase )
__lowerCAmelCase = loaded_model.signatures['''serving_default'''](__lowercase )['''output_0''']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def _snake_case (self ):
for tf_tokenizer in self.tf_tokenizers:
__lowerCAmelCase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowerCAmelCase = tf_tokenizer(__lowercase ) # Build model with some sample inputs
__lowerCAmelCase = tf_tokenizer.get_config()
__lowerCAmelCase = TFGPTaTokenizer.from_config(__lowercase )
__lowerCAmelCase = model_from_config(__lowercase )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def _snake_case (self ):
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
__lowerCAmelCase = 12_31_23
for max_length in [3, 5, 10_24]:
__lowerCAmelCase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowerCAmelCase = tf_tokenizer(__lowercase , max_length=__lowercase )
__lowerCAmelCase = out['''input_ids'''].numpy().shape[1]
assert out_length == max_length
| 474 | 0 |
"""simple docstring"""
import os
from math import logaa
def _lowercase ( __lowerCAmelCase = "base_exp.txt" ) -> int:
SCREAMING_SNAKE_CASE__ : float = 0
SCREAMING_SNAKE_CASE__ : Tuple = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(__lowerCAmelCase ) , __lowerCAmelCase ) ) ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = list(map(__lowerCAmelCase , line.split(""",""" ) ) )
if x * logaa(__lowerCAmelCase ) > largest:
SCREAMING_SNAKE_CASE__ : Tuple = x * logaa(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = i + 1
return result
if __name__ == "__main__":
print(solution())
| 680 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
a :str = {
"configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a :str = [
"ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ErnieForCausalLM",
"ErnieForMaskedLM",
"ErnieForMultipleChoice",
"ErnieForNextSentencePrediction",
"ErnieForPreTraining",
"ErnieForQuestionAnswering",
"ErnieForSequenceClassification",
"ErnieForTokenClassification",
"ErnieModel",
"ErniePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
a :Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 680 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __SCREAMING_SNAKE_CASE ( metaclass=lowercase__ ):
UpperCAmelCase : Union[str, Any] = ["""transformers""", """torch""", """note_seq"""]
def __init__( self :List[str] ,*__UpperCAmelCase :List[Any] ,**__UpperCAmelCase :Optional[Any] ) -> Dict:
"""simple docstring"""
requires_backends(self ,['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def lowercase_ ( cls :Optional[int] ,*__UpperCAmelCase :List[Any] ,**__UpperCAmelCase :Tuple ) -> str:
"""simple docstring"""
requires_backends(cls ,['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def lowercase_ ( cls :str ,*__UpperCAmelCase :str ,**__UpperCAmelCase :Any ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls ,['''transformers''', '''torch''', '''note_seq'''] )
| 715 | """simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
UpperCAmelCase : int = logging.getLogger(__name__)
@dataclass
class __SCREAMING_SNAKE_CASE :
UpperCAmelCase = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCAmelCase = field(
default=_lowerCAmelCase , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
UpperCAmelCase = field(
default=_lowerCAmelCase , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
UpperCAmelCase = field(
default=_lowerCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCAmelCase = field(
default=_lowerCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
UpperCAmelCase = field(
default=_lowerCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of prediction examples to this '''
'''value if set.'''
)
} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
UpperCAmelCase = field(
default=_lowerCAmelCase , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCAmelCase = field(
default=_lowerCAmelCase , metadata={'''help''': '''Evaluation language. Also train language if `train_language` is set to None.'''} )
UpperCAmelCase = field(
default=_lowerCAmelCase , metadata={'''help''': '''Train language if it is different from the evaluation language.'''} )
UpperCAmelCase = field(
default=_lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCAmelCase = field(
default=_lowerCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCAmelCase = field(
default=_lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCAmelCase = field(
default=_lowerCAmelCase , metadata={'''help''': '''arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'''} , )
UpperCAmelCase = field(
default=_lowerCAmelCase , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
UpperCAmelCase = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCAmelCase = field(
default=_lowerCAmelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
UpperCAmelCase = field(
default=_lowerCAmelCase , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def __a ( ):
"""simple docstring"""
lowerCamelCase__ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Any = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_xnli''' , _lowercase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase__ : List[Any] = training_args.get_process_log_level()
logger.setLevel(_lowercase )
datasets.utils.logging.set_verbosity(_lowercase )
transformers.utils.logging.set_verbosity(_lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowerCamelCase__ : Tuple = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase__ : Optional[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
lowerCamelCase__ : int = load_dataset(
'''xnli''' , model_args.language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowerCamelCase__ : Dict = load_dataset(
'''xnli''' , model_args.train_language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase__ : Optional[int] = train_dataset.features['''label'''].names
if training_args.do_eval:
lowerCamelCase__ : Optional[Any] = load_dataset(
'''xnli''' , model_args.language , split='''validation''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase__ : str = eval_dataset.features['''label'''].names
if training_args.do_predict:
lowerCamelCase__ : Tuple = load_dataset(
'''xnli''' , model_args.language , split='''test''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase__ : int = predict_dataset.features['''label'''].names
# Labels
lowerCamelCase__ : Optional[int] = len(_lowercase )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase__ : Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowercase , idalabel={str(_lowercase ): label for i, label in enumerate(_lowercase )} , labelaid={label: i for i, label in enumerate(_lowercase )} , finetuning_task='''xnli''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase__ : Dict = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase__ : Tuple = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
lowerCamelCase__ : Dict = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCamelCase__ : Tuple = False
def preprocess_function(_lowercase ):
# Tokenize the texts
return tokenizer(
examples['''premise'''] , examples['''hypothesis'''] , padding=_lowercase , max_length=data_args.max_seq_length , truncation=_lowercase , )
if training_args.do_train:
if data_args.max_train_samples is not None:
lowerCamelCase__ : List[Any] = min(len(_lowercase ) , data_args.max_train_samples )
lowerCamelCase__ : Tuple = train_dataset.select(range(_lowercase ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
lowerCamelCase__ : Any = train_dataset.map(
_lowercase , batched=_lowercase , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on train dataset''' , )
# Log a few random samples from the training set:
for index in random.sample(range(len(_lowercase ) ) , 3 ):
logger.info(f"""Sample {index} of the training set: {train_dataset[index]}.""" )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowerCamelCase__ : List[Any] = min(len(_lowercase ) , data_args.max_eval_samples )
lowerCamelCase__ : Union[str, Any] = eval_dataset.select(range(_lowercase ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
lowerCamelCase__ : int = eval_dataset.map(
_lowercase , batched=_lowercase , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on validation dataset''' , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
lowerCamelCase__ : List[Any] = min(len(_lowercase ) , data_args.max_predict_samples )
lowerCamelCase__ : int = predict_dataset.select(range(_lowercase ) )
with training_args.main_process_first(desc='''prediction dataset map pre-processing''' ):
lowerCamelCase__ : Any = predict_dataset.map(
_lowercase , batched=_lowercase , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on prediction dataset''' , )
# Get the metric function
lowerCamelCase__ : Dict = evaluate.load('''xnli''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_lowercase ):
lowerCamelCase__ : List[Any] = p.predictions[0] if isinstance(p.predictions , _lowercase ) else p.predictions
lowerCamelCase__ : List[Any] = np.argmax(_lowercase , axis=1 )
return metric.compute(predictions=_lowercase , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCamelCase__ : str = default_data_collator
elif training_args.fpaa:
lowerCamelCase__ : Dict = DataCollatorWithPadding(_lowercase , pad_to_multiple_of=8 )
else:
lowerCamelCase__ : Dict = None
# Initialize our Trainer
lowerCamelCase__ : List[str] = Trainer(
model=_lowercase , args=_lowercase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_lowercase , tokenizer=_lowercase , data_collator=_lowercase , )
# Training
if training_args.do_train:
lowerCamelCase__ : int = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase__ : Union[str, Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase__ : Tuple = last_checkpoint
lowerCamelCase__ : Any = trainer.train(resume_from_checkpoint=_lowercase )
lowerCamelCase__ : Union[str, Any] = train_result.metrics
lowerCamelCase__ : Tuple = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_lowercase )
)
lowerCamelCase__ : Optional[Any] = min(_lowercase , len(_lowercase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , _lowercase )
trainer.save_metrics('''train''' , _lowercase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCamelCase__ : Any = trainer.evaluate(eval_dataset=_lowercase )
lowerCamelCase__ : str = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_lowercase )
lowerCamelCase__ : str = min(_lowercase , len(_lowercase ) )
trainer.log_metrics('''eval''' , _lowercase )
trainer.save_metrics('''eval''' , _lowercase )
# Prediction
if training_args.do_predict:
logger.info('''*** Predict ***''' )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = trainer.predict(_lowercase , metric_key_prefix='''predict''' )
lowerCamelCase__ : Dict = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(_lowercase )
)
lowerCamelCase__ : Any = min(_lowercase , len(_lowercase ) )
trainer.log_metrics('''predict''' , _lowercase )
trainer.save_metrics('''predict''' , _lowercase )
lowerCamelCase__ : Optional[Any] = np.argmax(_lowercase , axis=1 )
lowerCamelCase__ : Tuple = os.path.join(training_args.output_dir , '''predictions.txt''' )
if trainer.is_world_process_zero():
with open(_lowercase , '''w''' ) as writer:
writer.write('''index\tprediction\n''' )
for index, item in enumerate(_lowercase ):
lowerCamelCase__ : Optional[Any] = label_list[item]
writer.write(f"""{index}\t{item}\n""" )
if __name__ == "__main__":
main()
| 121 | 0 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 488 |
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class snake_case_ ( __A ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
lowerCamelCase_ : List[str] = tempfile.mkdtemp()
lowerCamelCase_ : Dict = 8
# DPR tok
lowerCamelCase_ : str = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowerCamelCase_ : Optional[int] = os.path.join(self.tmpdirname , "dpr_tokenizer" )
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
lowerCamelCase_ : List[str] = os.path.join(__magic_name__ , DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
lowerCamelCase_ : Tuple = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowerCamelCase_ : str = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
lowerCamelCase_ : Optional[Any] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowerCamelCase_ : Any = {"unk_token": "<unk>"}
lowerCamelCase_ : Dict = os.path.join(self.tmpdirname , "bart_tokenizer" )
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
lowerCamelCase_ : Tuple = os.path.join(__magic_name__ , BART_VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase_ : int = os.path.join(__magic_name__ , BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__magic_name__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__magic_name__ ) )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> DPRContextEncoderTokenizer:
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
lowerCamelCase_ : Optional[Any] = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
lowerCamelCase_ : str = self.get_dummy_dataset()
lowerCamelCase_ : Dict = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
lowerCamelCase_ : Tuple = dataset
lowerCamelCase_ : Optional[int] = RagRetriever(
__magic_name__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : bool ) -> List[Any]:
lowerCamelCase_ : Optional[int] = self.get_dummy_dataset()
lowerCamelCase_ : Union[str, Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="custom" , )
if from_disk:
lowerCamelCase_ : Union[str, Any] = os.path.join(self.tmpdirname , "dataset" )
lowerCamelCase_ : Tuple = os.path.join(self.tmpdirname , "index.faiss" )
dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname , "index.faiss" ) )
dataset.drop_index("embeddings" )
dataset.save_to_disk(os.path.join(self.tmpdirname , "dataset" ) )
del dataset
lowerCamelCase_ : Optional[int] = RagRetriever(
__magic_name__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
lowerCamelCase_ : List[Any] = RagRetriever(
__magic_name__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __magic_name__ ) , )
return retriever
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
lowerCamelCase_ : str = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCamelCase_ : List[str] = os.path.join(self.tmpdirname , "hf_bert_base.hnswSQ8_correct_phi_128.c_index" )
dataset.save_faiss_index("embeddings" , index_file_name + ".index.dpr" )
pickle.dump(dataset["id"] , open(index_file_name + ".index_meta.dpr" , "wb" ) )
lowerCamelCase_ : List[str] = os.path.join(self.tmpdirname , "psgs_w100.tsv.pkl" )
lowerCamelCase_ : Optional[int] = {sample["id"]: [sample["text"], sample["title"]] for sample in dataset}
pickle.dump(__magic_name__ , open(__magic_name__ , "wb" ) )
lowerCamelCase_ : Tuple = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="legacy" , index_path=self.tmpdirname , )
lowerCamelCase_ : Any = RagRetriever(
__magic_name__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
lowerCamelCase_ : Optional[Any] = 1
lowerCamelCase_ : int = self.get_dummy_canonical_hf_index_retriever()
lowerCamelCase_ : List[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Any = retriever.retrieve(__magic_name__ , n_docs=__magic_name__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__magic_name__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , __magic_name__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
lowerCamelCase_ : Dict = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
lowerCamelCase_ : Dict = self.get_dummy_dataset()
retriever.save_pretrained(__magic_name__ )
lowerCamelCase_ : List[Any] = RagRetriever.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
lowerCamelCase_ : List[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCamelCase_ : List[str] = retriever.retrieve(__magic_name__ , n_docs=1 )
self.assertTrue(out is not None )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
lowerCamelCase_ : Optional[int] = 1
lowerCamelCase_ : List[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=__magic_name__ )
lowerCamelCase_ : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Optional[int] = retriever.retrieve(__magic_name__ , n_docs=__magic_name__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__magic_name__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , __magic_name__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
lowerCamelCase_ : str = self.get_dummy_custom_hf_index_retriever(from_disk=__magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__magic_name__ )
lowerCamelCase_ : List[Any] = RagRetriever.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
lowerCamelCase_ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCamelCase_ : int = retriever.retrieve(__magic_name__ , n_docs=1 )
self.assertTrue(out is not None )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
lowerCamelCase_ : Union[str, Any] = 1
lowerCamelCase_ : Tuple = self.get_dummy_custom_hf_index_retriever(from_disk=__magic_name__ )
lowerCamelCase_ : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : List[str] = retriever.retrieve(__magic_name__ , n_docs=__magic_name__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__magic_name__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , __magic_name__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
lowerCamelCase_ : List[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=__magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__magic_name__ )
lowerCamelCase_ : str = RagRetriever.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
lowerCamelCase_ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCamelCase_ : List[Any] = retriever.retrieve(__magic_name__ , n_docs=1 )
self.assertTrue(out is not None )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
lowerCamelCase_ : Union[str, Any] = 1
lowerCamelCase_ : Dict = self.get_dummy_legacy_index_retriever()
lowerCamelCase_ : Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : List[str] = retriever.retrieve(__magic_name__ , n_docs=__magic_name__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__magic_name__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["text", "title"] )
self.assertEqual(len(doc_dicts[0]["text"] ) , __magic_name__ )
self.assertEqual(doc_dicts[0]["text"][0] , "bar" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["text"][0] , "foo" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
lowerCamelCase_ : List[str] = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__magic_name__ )
lowerCamelCase_ : Optional[int] = RagRetriever.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
lowerCamelCase_ : Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCamelCase_ : Optional[Any] = retriever.retrieve(__magic_name__ , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
import torch
lowerCamelCase_ : Dict = 1
lowerCamelCase_ : List[str] = self.get_dummy_canonical_hf_index_retriever()
lowerCamelCase_ : str = [[5, 7], [10, 11]]
lowerCamelCase_ : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCamelCase_ : Dict = retriever(__magic_name__ , __magic_name__ , prefix=retriever.config.generator.prefix , n_docs=__magic_name__ )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Any = (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__magic_name__ , __magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
self.assertIsInstance(__magic_name__ , np.ndarray )
lowerCamelCase_ : Dict = retriever(
__magic_name__ , __magic_name__ , prefix=retriever.config.generator.prefix , n_docs=__magic_name__ , return_tensors="pt" , )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : List[Any] = ( # noqa: F841
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
out["doc_ids"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__magic_name__ , torch.Tensor )
self.assertIsInstance(__magic_name__ , torch.Tensor )
self.assertIsInstance(__magic_name__ , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
lowerCamelCase_ : Optional[int] = self.get_dpr_ctx_encoder_tokenizer()
lowerCamelCase_ : Any = 1
lowerCamelCase_ : Union[str, Any] = self.get_dummy_custom_hf_index_retriever(from_disk=__magic_name__ )
retriever.set_ctx_encoder_tokenizer(__magic_name__ )
lowerCamelCase_ : List[Any] = [[5, 7], [10, 11]]
lowerCamelCase_ : Optional[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCamelCase_ : int = retriever(__magic_name__ , __magic_name__ , prefix=retriever.config.generator.prefix , n_docs=__magic_name__ )
self.assertEqual(
len(__magic_name__ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ) , __magic_name__ ) # check for doc token related keys in dictionary.
| 488 | 1 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCAmelCase_ ( snake_case_,snake_case_=0.9_99,snake_case_="cosine",):
if alpha_transform_type == "cosine":
def alpha_bar_fn(snake_case_ ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(snake_case_ ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
_A : Tuple = []
for i in range(snake_case_ ):
_A : str = i / num_diffusion_timesteps
_A : List[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(snake_case_ ) / alpha_bar_fn(snake_case_ ),snake_case_ ) )
return torch.tensor(snake_case_,dtype=torch.floataa )
class lowercase ( UpperCamelCase__,UpperCamelCase__ ):
_a = [e.name for e in KarrasDiffusionSchedulers]
_a = 2
@register_to_config
def __init__( self , _a = 1000 , _a = 0.00085 , _a = 0.012 , _a = "linear" , _a = None , _a = "epsilon" , _a = False , _a = False , _a = 1.0 , _a = "linspace" , _a = 0 , ) -> int:
if trained_betas is not None:
_A : Optional[int] = torch.tensor(_a , dtype=torch.floataa )
elif beta_schedule == "linear":
_A : List[str] = torch.linspace(_a , _a , _a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_A : List[str] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_A : int = betas_for_alpha_bar(_a , alpha_transform_type="""cosine""" )
elif beta_schedule == "exp":
_A : List[str] = betas_for_alpha_bar(_a , alpha_transform_type="""exp""" )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
_A : Tuple = 1.0 - self.betas
_A : Dict = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(_a , _a , _a )
_A : Any = use_karras_sigmas
def a__ ( self , _a , _a=None ) -> str:
if schedule_timesteps is None:
_A : Optional[Any] = self.timesteps
_A : str = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_A : List[Any] = 1 if len(_a ) > 1 else 0
else:
_A : Any = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
_A : Any = self._index_counter[timestep_int]
return indices[pos].item()
@property
def a__ ( self ) -> Any:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def a__ ( self , _a , _a , ) -> torch.FloatTensor:
_A : Dict = self.index_for_timestep(_a )
_A : Optional[int] = self.sigmas[step_index]
_A : Any = sample / ((sigma**2 + 1) ** 0.5)
return sample
def a__ ( self , _a , _a = None , _a = None , ) -> Union[str, Any]:
_A : str = num_inference_steps
_A : Optional[Any] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_A : Dict = np.linspace(0 , num_train_timesteps - 1 , _a , dtype=_a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_A : Any = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_A : Any = (np.arange(0 , _a ) * step_ratio).round()[::-1].copy().astype(_a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_A : Dict = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_A : Tuple = (np.arange(_a , 0 , -step_ratio )).round().copy().astype(_a )
timesteps -= 1
else:
raise ValueError(
F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
_A : str = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_A : Optional[int] = np.log(_a )
_A : List[str] = np.interp(_a , np.arange(0 , len(_a ) ) , _a )
if self.config.use_karras_sigmas:
_A : str = self._convert_to_karras(in_sigmas=_a , num_inference_steps=self.num_inference_steps )
_A : str = np.array([self._sigma_to_t(_a , _a ) for sigma in sigmas] )
_A : Union[str, Any] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_A : str = torch.from_numpy(_a ).to(device=_a )
_A : int = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
_A : Dict = torch.from_numpy(_a )
_A : str = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(_a ).startswith("""mps""" ):
# mps does not support float64
_A : Tuple = timesteps.to(_a , dtype=torch.floataa )
else:
_A : Optional[int] = timesteps.to(device=_a )
# empty dt and derivative
_A : Union[str, Any] = None
_A : Optional[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_A : Dict = defaultdict(_a )
def a__ ( self , _a , _a ) -> List[Any]:
# get log sigma
_A : str = np.log(_a )
# get distribution
_A : List[str] = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
_A : int = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
_A : Any = low_idx + 1
_A : Dict = log_sigmas[low_idx]
_A : List[str] = log_sigmas[high_idx]
# interpolate sigmas
_A : Tuple = (low - log_sigma) / (low - high)
_A : Optional[int] = np.clip(_a , 0 , 1 )
# transform interpolation to time range
_A : List[Any] = (1 - w) * low_idx + w * high_idx
_A : str = t.reshape(sigma.shape )
return t
def a__ ( self , _a , _a ) -> torch.FloatTensor:
_A : float = in_sigmas[-1].item()
_A : float = in_sigmas[0].item()
_A : List[str] = 7.0 # 7.0 is the value used in the paper
_A : Optional[int] = np.linspace(0 , 1 , _a )
_A : Optional[Any] = sigma_min ** (1 / rho)
_A : Optional[Any] = sigma_max ** (1 / rho)
_A : Union[str, Any] = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def a__ ( self ) -> Dict:
return self.dt is None
def a__ ( self , _a , _a , _a , _a = True , ) -> Union[SchedulerOutput, Tuple]:
_A : str = self.index_for_timestep(_a )
# advance index counter by 1
_A : Optional[int] = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_A : Optional[Any] = self.sigmas[step_index]
_A : Any = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
_A : Optional[int] = self.sigmas[step_index - 1]
_A : List[str] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_A : Union[str, Any] = 0
_A : Tuple = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_A : List[str] = sigma_hat if self.state_in_first_order else sigma_next
_A : Optional[Any] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_A : Union[str, Any] = sigma_hat if self.state_in_first_order else sigma_next
_A : Tuple = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
_A : Optional[Any] = model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.config.clip_sample:
_A : Tuple = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_A : Any = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_A : Tuple = sigma_next - sigma_hat
# store for 2nd order step
_A : List[Any] = derivative
_A : Dict = dt
_A : Dict = sample
else:
# 2. 2nd order / Heun's method
_A : Dict = (sample - pred_original_sample) / sigma_next
_A : Dict = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
_A : Any = self.dt
_A : List[str] = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
_A : List[str] = None
_A : List[Any] = None
_A : List[Any] = None
_A : List[Any] = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_a )
def a__ ( self , _a , _a , _a , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_A : List[Any] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_a ):
# mps does not support float64
_A : List[Any] = self.timesteps.to(original_samples.device , dtype=torch.floataa )
_A : Tuple = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
_A : str = self.timesteps.to(original_samples.device )
_A : List[str] = timesteps.to(original_samples.device )
_A : Union[str, Any] = [self.index_for_timestep(_a , _a ) for t in timesteps]
_A : Union[str, Any] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_A : Optional[int] = sigma.unsqueeze(-1 )
_A : Dict = original_samples + noise * sigma
return noisy_samples
def __len__( self ) -> List[str]:
return self.config.num_train_timesteps
| 54 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 54 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'google/mobilenet_v2_1.4_224': 'https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json',
'google/mobilenet_v2_1.0_224': 'https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json',
'google/mobilenet_v2_0.75_160': 'https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json',
'google/mobilenet_v2_0.35_96': 'https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class lowerCamelCase (_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a = "mobilenet_v2"
def __init__( self : str , _snake_case : Union[str, Any]=3 , _snake_case : Optional[int]=224 , _snake_case : str=1.0 , _snake_case : Union[str, Any]=8 , _snake_case : List[Any]=8 , _snake_case : Tuple=6 , _snake_case : Optional[Any]=32 , _snake_case : Any=True , _snake_case : Tuple=True , _snake_case : Tuple="relu6" , _snake_case : Optional[int]=True , _snake_case : List[str]=0.8 , _snake_case : Any=0.02 , _snake_case : Dict=0.001 , _snake_case : List[str]=255 , **_snake_case : Any , ) -> List[Any]:
super().__init__(**_snake_case )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = depth_multiplier
SCREAMING_SNAKE_CASE__ = depth_divisible_by
SCREAMING_SNAKE_CASE__ = min_depth
SCREAMING_SNAKE_CASE__ = expand_ratio
SCREAMING_SNAKE_CASE__ = output_stride
SCREAMING_SNAKE_CASE__ = first_layer_is_expansion
SCREAMING_SNAKE_CASE__ = finegrained_output
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = tf_padding
SCREAMING_SNAKE_CASE__ = classifier_dropout_prob
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = semantic_loss_ignore_index
class lowerCamelCase (_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a = version.parse("1.11" )
@property
def lowerCAmelCase_ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def lowerCAmelCase_ ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def lowerCAmelCase_ ( self : Optional[Any] ) -> float:
return 1e-4
| 159 | """simple docstring"""
import math
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> bool:
SCREAMING_SNAKE_CASE__ = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase = 1 / 12_345 ) -> int:
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 3
while True:
SCREAMING_SNAKE_CASE__ = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = int(__UpperCAmelCase )
total_partitions += 1
if check_partition_perfect(__UpperCAmelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(__UpperCAmelCase )
integer += 1
if __name__ == "__main__":
print(F'{solution() = }')
| 159 | 1 |
def lowerCAmelCase_ ( A_):
UpperCamelCase__: Dict = len(_snake_case)
UpperCamelCase__: Optional[int] = len(matrix[0])
UpperCamelCase__: Optional[int] = min(_snake_case ,_snake_case)
for row in range(_snake_case):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 ,_snake_case):
UpperCamelCase__: List[str] = matrix[col][row] / matrix[row][row]
for i in range(_snake_case ,_snake_case):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
UpperCamelCase__: Optional[Any] = True
for i in range(row + 1 ,_snake_case):
if matrix[i][row] != 0:
UpperCamelCase__ , UpperCamelCase__: int = matrix[i], matrix[row]
UpperCamelCase__: Union[str, Any] = False
break
if reduce:
rank -= 1
for i in range(_snake_case):
UpperCamelCase__: List[Any] = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
A__: Union[str, Any] = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class _a ( UpperCamelCase__):
"""simple docstring"""
UpperCamelCase__ = """albert"""
def __init__( self: Dict , __lowerCamelCase: int=3_0000 , __lowerCamelCase: Dict=128 , __lowerCamelCase: Optional[int]=4096 , __lowerCamelCase: Optional[int]=12 , __lowerCamelCase: List[Any]=1 , __lowerCamelCase: List[Any]=64 , __lowerCamelCase: Optional[Any]=1_6384 , __lowerCamelCase: int=1 , __lowerCamelCase: List[str]="gelu_new" , __lowerCamelCase: Optional[int]=0 , __lowerCamelCase: Optional[Any]=0 , __lowerCamelCase: Union[str, Any]=512 , __lowerCamelCase: Union[str, Any]=2 , __lowerCamelCase: Union[str, Any]=0.02 , __lowerCamelCase: Any=1e-12 , __lowerCamelCase: int=0.1 , __lowerCamelCase: Dict="absolute" , __lowerCamelCase: List[str]=0 , __lowerCamelCase: Optional[Any]=2 , __lowerCamelCase: Dict=3 , **__lowerCamelCase: int , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
UpperCamelCase__: Any = vocab_size
UpperCamelCase__: str = embedding_size
UpperCamelCase__: Optional[Any] = hidden_size
UpperCamelCase__: Any = num_hidden_layers
UpperCamelCase__: str = num_hidden_groups
UpperCamelCase__: int = num_attention_heads
UpperCamelCase__: Union[str, Any] = inner_group_num
UpperCamelCase__: str = hidden_act
UpperCamelCase__: Tuple = intermediate_size
UpperCamelCase__: Dict = hidden_dropout_prob
UpperCamelCase__: List[Any] = attention_probs_dropout_prob
UpperCamelCase__: List[str] = max_position_embeddings
UpperCamelCase__: Optional[Any] = type_vocab_size
UpperCamelCase__: Any = initializer_range
UpperCamelCase__: int = layer_norm_eps
UpperCamelCase__: List[str] = classifier_dropout_prob
UpperCamelCase__: str = position_embedding_type
class _a ( UpperCamelCase__):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase__: Optional[int] = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCamelCase__: Optional[int] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 221 | 0 |
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[Any]=1E-12 ):
UpperCamelCase_ : int = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_SCREAMING_SNAKE_CASE , axis=1 ) , a_min=_SCREAMING_SNAKE_CASE ) ).T
UpperCamelCase_ : List[Any] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_SCREAMING_SNAKE_CASE , axis=1 ) , a_min=_SCREAMING_SNAKE_CASE ) ).T
return jnp.matmul(_SCREAMING_SNAKE_CASE , norm_emb_a.T )
class UpperCamelCase ( nn.Module ):
a__ :CLIPConfig
a__ :jnp.dtype = jnp.floataa
def A_ (self ) -> Optional[int]:
UpperCamelCase_ : Tuple = FlaxCLIPVisionModule(self.config.vision_config )
UpperCamelCase_ : Dict = nn.Dense(self.config.projection_dim , use_bias=__UpperCamelCase , dtype=self.dtype )
UpperCamelCase_ : Any = self.param("""concept_embeds""" , jax.nn.initializers.ones , (17, self.config.projection_dim) )
UpperCamelCase_ : str = self.param(
"""special_care_embeds""" , jax.nn.initializers.ones , (3, self.config.projection_dim) )
UpperCamelCase_ : str = self.param("""concept_embeds_weights""" , jax.nn.initializers.ones , (17,) )
UpperCamelCase_ : Optional[int] = self.param("""special_care_embeds_weights""" , jax.nn.initializers.ones , (3,) )
def __call__(self , __UpperCamelCase ) -> Optional[Any]:
UpperCamelCase_ : int = self.vision_model(__UpperCamelCase )[1]
UpperCamelCase_ : Tuple = self.visual_projection(__UpperCamelCase )
UpperCamelCase_ : Any = jax_cosine_distance(__UpperCamelCase , self.special_care_embeds )
UpperCamelCase_ : Dict = jax_cosine_distance(__UpperCamelCase , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
UpperCamelCase_ : Optional[int] = 0.0
UpperCamelCase_ : str = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
UpperCamelCase_ : Dict = jnp.round(__UpperCamelCase , 3 )
UpperCamelCase_ : Any = jnp.any(special_scores > 0 , axis=1 , keepdims=__UpperCamelCase )
# Use a lower threshold if an image has any special care concept
UpperCamelCase_ : List[str] = is_special_care * 0.01
UpperCamelCase_ : int = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
UpperCamelCase_ : Dict = jnp.round(__UpperCamelCase , 3 )
UpperCamelCase_ : str = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class UpperCamelCase ( __a ):
a__ :Tuple = CLIPConfig
a__ :Optional[int] = '''clip_input'''
a__ :Union[str, Any] = FlaxStableDiffusionSafetyCheckerModule
def __init__(self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = 0 , __UpperCamelCase = jnp.floataa , __UpperCamelCase = True , **__UpperCamelCase , ) -> int:
if input_shape is None:
UpperCamelCase_ : str = (1, 224, 224, 3)
UpperCamelCase_ : Dict = self.module_class(config=__UpperCamelCase , dtype=__UpperCamelCase , **__UpperCamelCase )
super().__init__(__UpperCamelCase , __UpperCamelCase , input_shape=__UpperCamelCase , seed=__UpperCamelCase , dtype=__UpperCamelCase , _do_init=_do_init )
def A_ (self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None ) -> FrozenDict:
# init input tensor
UpperCamelCase_ : Optional[int] = jax.random.normal(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase_,UpperCamelCase_ : str = jax.random.split(__UpperCamelCase )
UpperCamelCase_ : Tuple = {"""params""": params_rng, """dropout""": dropout_rng}
UpperCamelCase_ : List[str] = self.module.init(__UpperCamelCase , __UpperCamelCase )["""params"""]
return random_params
def __call__(self , __UpperCamelCase , __UpperCamelCase = None , ) -> List[Any]:
UpperCamelCase_ : str = jnp.transpose(__UpperCamelCase , (0, 2, 3, 1) )
return self.module.apply(
{"""params""": params or self.params} , jnp.array(__UpperCamelCase , dtype=jnp.floataa ) , rngs={} , )
| 635 | import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class UpperCamelCase ( __a ):
def A_ (self ) -> Any:
UpperCamelCase_ : Dict = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__UpperCamelCase , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(__UpperCamelCase , """num_attention_heads""" ) )
class UpperCamelCase :
def __init__(self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=64 , __UpperCamelCase=3 , __UpperCamelCase=3 , __UpperCamelCase=2 , __UpperCamelCase=1 , __UpperCamelCase=16 , __UpperCamelCase=[128, 256, 384] , __UpperCamelCase=[4, 6, 8] , __UpperCamelCase=[2, 3, 4] , __UpperCamelCase=[16, 16, 16] , __UpperCamelCase=0 , __UpperCamelCase=[2, 2, 2] , __UpperCamelCase=[2, 2, 2] , __UpperCamelCase=0.02 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=2 , ) -> Optional[int]:
UpperCamelCase_ : Tuple = parent
UpperCamelCase_ : Optional[Any] = batch_size
UpperCamelCase_ : Dict = image_size
UpperCamelCase_ : Dict = num_channels
UpperCamelCase_ : Optional[Any] = kernel_size
UpperCamelCase_ : int = stride
UpperCamelCase_ : str = padding
UpperCamelCase_ : Tuple = hidden_sizes
UpperCamelCase_ : int = num_attention_heads
UpperCamelCase_ : List[str] = depths
UpperCamelCase_ : Dict = key_dim
UpperCamelCase_ : Any = drop_path_rate
UpperCamelCase_ : List[Any] = patch_size
UpperCamelCase_ : Any = attention_ratio
UpperCamelCase_ : Optional[Any] = mlp_ratio
UpperCamelCase_ : Optional[int] = initializer_range
UpperCamelCase_ : Optional[Any] = [
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
UpperCamelCase_ : Tuple = is_training
UpperCamelCase_ : Any = use_labels
UpperCamelCase_ : Dict = num_labels
UpperCamelCase_ : List[str] = initializer_range
def A_ (self ) -> Dict:
UpperCamelCase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_ : List[str] = None
if self.use_labels:
UpperCamelCase_ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase_ : Any = self.get_config()
return config, pixel_values, labels
def A_ (self ) -> Optional[int]:
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def A_ (self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
UpperCamelCase_ : int = LevitModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCamelCase_ : List[Any] = model(__UpperCamelCase )
UpperCamelCase_ : int = (self.image_size, self.image_size)
UpperCamelCase_,UpperCamelCase_ : Optional[int] = image_size[0], image_size[1]
for _ in range(4 ):
UpperCamelCase_ : Union[str, Any] = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
UpperCamelCase_ : List[Any] = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def A_ (self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Any:
UpperCamelCase_ : List[str] = self.num_labels
UpperCamelCase_ : Any = LevitForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCamelCase_ : int = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ (self ) -> str:
UpperCamelCase_ : Tuple = self.prepare_config_and_inputs()
UpperCamelCase_,UpperCamelCase_,UpperCamelCase_ : Any = config_and_inputs
UpperCamelCase_ : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( __a , __a , unittest.TestCase ):
a__ :Any = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
a__ :str = (
{
'''feature-extraction''': LevitModel,
'''image-classification''': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
a__ :Optional[Any] = False
a__ :Optional[int] = False
a__ :Tuple = False
a__ :List[str] = False
a__ :Dict = False
def A_ (self ) -> List[Any]:
UpperCamelCase_ : int = LevitModelTester(self )
UpperCamelCase_ : Union[str, Any] = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def A_ (self ) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A_ (self ) -> Optional[Any]:
return
@unittest.skip(reason="""Levit does not use inputs_embeds""" )
def A_ (self ) -> int:
pass
@unittest.skip(reason="""Levit does not support input and output embeddings""" )
def A_ (self ) -> Any:
pass
@unittest.skip(reason="""Levit does not output attentions""" )
def A_ (self ) -> List[str]:
pass
def A_ (self ) -> Union[str, Any]:
UpperCamelCase_,UpperCamelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ : str = model_class(__UpperCamelCase )
UpperCamelCase_ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ : Dict = [*signature.parameters.keys()]
UpperCamelCase_ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def A_ (self ) -> Optional[Any]:
def check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
UpperCamelCase_ : List[str] = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
UpperCamelCase_ : Union[str, Any] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
UpperCamelCase_ : Optional[Any] = outputs.hidden_states
UpperCamelCase_ : Optional[int] = len(self.model_tester.depths ) + 1
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
UpperCamelCase_ : Tuple = (self.model_tester.image_size, self.model_tester.image_size)
UpperCamelCase_,UpperCamelCase_ : Optional[int] = image_size[0], image_size[1]
for _ in range(4 ):
UpperCamelCase_ : Dict = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
UpperCamelCase_ : List[str] = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
UpperCamelCase_,UpperCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ : Union[str, Any] = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase_ : List[str] = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def A_ (self ) -> Dict:
pass
def A_ (self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ) -> Tuple:
UpperCamelCase_ : List[str] = super()._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def A_ (self ) -> Tuple:
UpperCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def A_ (self ) -> List[Any]:
UpperCamelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
def A_ (self ) -> Optional[Any]:
if not self.model_tester.is_training:
return
UpperCamelCase_,UpperCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ : List[Any] = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__UpperCamelCase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
UpperCamelCase_ : int = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
UpperCamelCase_ : List[str] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
UpperCamelCase_ : int = model(**__UpperCamelCase ).loss
loss.backward()
def A_ (self ) -> Union[str, Any]:
UpperCamelCase_,UpperCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCamelCase_ : Tuple = False
UpperCamelCase_ : str = True
for model_class in self.all_model_classes:
if model_class in get_values(__UpperCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
UpperCamelCase_ : str = model_class(__UpperCamelCase )
model.gradient_checkpointing_enable()
model.to(__UpperCamelCase )
model.train()
UpperCamelCase_ : Optional[Any] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
UpperCamelCase_ : Optional[Any] = model(**__UpperCamelCase ).loss
loss.backward()
def A_ (self ) -> Union[str, Any]:
UpperCamelCase_,UpperCamelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ : Any = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__UpperCamelCase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f'''Testing {model_class} with {problem_type["title"]}''' ):
UpperCamelCase_ : Any = problem_type["""title"""]
UpperCamelCase_ : Dict = problem_type["""num_labels"""]
UpperCamelCase_ : Dict = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
UpperCamelCase_ : Optional[int] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
if problem_type["num_labels"] > 1:
UpperCamelCase_ : Union[str, Any] = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
UpperCamelCase_ : Tuple = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__UpperCamelCase ) as warning_list:
UpperCamelCase_ : str = model(**__UpperCamelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def A_ (self ) -> Dict:
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ : Any = LevitModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def lowerCAmelCase_ ( ):
UpperCamelCase_ : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
@cached_property
def A_ (self ) -> Any:
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def A_ (self ) -> str:
UpperCamelCase_ : Tuple = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__UpperCamelCase )
UpperCamelCase_ : Optional[Any] = self.default_image_processor
UpperCamelCase_ : List[str] = prepare_img()
UpperCamelCase_ : Optional[int] = image_processor(images=__UpperCamelCase , return_tensors="""pt""" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
UpperCamelCase_ : Union[str, Any] = model(**__UpperCamelCase )
# verify the logits
UpperCamelCase_ : List[str] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
UpperCamelCase_ : Any = torch.tensor([1.0_448, -0.3_745, -1.8_317] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1E-4 ) )
| 635 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
a : Optional[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = ["""pixel_values"""]
def __init__( self : Tuple , a_ : bool = True , a_ : Dict[str, int] = None , a_ : PILImageResampling = PILImageResampling.BICUBIC , a_ : bool = True , a_ : Dict[str, int] = None , a_ : bool = True , a_ : Union[int, float] = 1 / 255 , a_ : bool = True , a_ : Optional[Union[float, List[float]]] = None , a_ : Optional[Union[float, List[float]]] = None , a_ : bool = True , **a_ : List[str] , ):
"""simple docstring"""
super().__init__(**a_ )
__snake_case = size if size is not None else {"shortest_edge": 224}
__snake_case = get_size_dict(a_ , default_to_square=a_ )
__snake_case = crop_size if crop_size is not None else {"height": 224, "width": 224}
__snake_case = get_size_dict(a_ , default_to_square=a_ , param_name="crop_size" )
__snake_case = do_resize
__snake_case = size
__snake_case = resample
__snake_case = do_center_crop
__snake_case = crop_size
__snake_case = do_rescale
__snake_case = rescale_factor
__snake_case = do_normalize
__snake_case = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__snake_case = image_std if image_std is not None else OPENAI_CLIP_STD
__snake_case = do_convert_rgb
def A ( self : str , a_ : np.ndarray , a_ : Dict[str, int] , a_ : PILImageResampling = PILImageResampling.BICUBIC , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : List[Any] , ):
"""simple docstring"""
__snake_case = get_size_dict(a_ , default_to_square=a_ )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
__snake_case = get_resize_output_image_size(a_ , size=size["shortest_edge"] , default_to_square=a_ )
return resize(a_ , size=a_ , resample=a_ , data_format=a_ , **a_ )
def A ( self : int , a_ : np.ndarray , a_ : Dict[str, int] , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : Any , ):
"""simple docstring"""
__snake_case = get_size_dict(a_ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(a_ , size=(size["height"], size["width"]) , data_format=a_ , **a_ )
def A ( self : str , a_ : np.ndarray , a_ : Union[int, float] , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : Tuple , ):
"""simple docstring"""
return rescale(a_ , scale=a_ , data_format=a_ , **a_ )
def A ( self : Optional[Any] , a_ : np.ndarray , a_ : Union[float, List[float]] , a_ : Union[float, List[float]] , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : List[Any] , ):
"""simple docstring"""
return normalize(a_ , mean=a_ , std=a_ , data_format=a_ , **a_ )
def A ( self : Dict , a_ : ImageInput , a_ : bool = None , a_ : Dict[str, int] = None , a_ : PILImageResampling = None , a_ : bool = None , a_ : int = None , a_ : bool = None , a_ : float = None , a_ : bool = None , a_ : Optional[Union[float, List[float]]] = None , a_ : Optional[Union[float, List[float]]] = None , a_ : bool = None , a_ : Optional[Union[str, TensorType]] = None , a_ : Optional[ChannelDimension] = ChannelDimension.FIRST , **a_ : Any , ):
"""simple docstring"""
__snake_case = do_resize if do_resize is not None else self.do_resize
__snake_case = size if size is not None else self.size
__snake_case = get_size_dict(a_ , param_name="size" , default_to_square=a_ )
__snake_case = resample if resample is not None else self.resample
__snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop
__snake_case = crop_size if crop_size is not None else self.crop_size
__snake_case = get_size_dict(a_ , param_name="crop_size" , default_to_square=a_ )
__snake_case = do_rescale if do_rescale is not None else self.do_rescale
__snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
__snake_case = do_normalize if do_normalize is not None else self.do_normalize
__snake_case = image_mean if image_mean is not None else self.image_mean
__snake_case = image_std if image_std is not None else self.image_std
__snake_case = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__snake_case = make_list_of_images(a_ )
if not valid_images(a_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__snake_case = [convert_to_rgb(a_ ) for image in images]
# All transformations expect numpy arrays.
__snake_case = [to_numpy_array(a_ ) for image in images]
if do_resize:
__snake_case = [self.resize(image=a_ , size=a_ , resample=a_ ) for image in images]
if do_center_crop:
__snake_case = [self.center_crop(image=a_ , size=a_ ) for image in images]
if do_rescale:
__snake_case = [self.rescale(image=a_ , scale=a_ ) for image in images]
if do_normalize:
__snake_case = [self.normalize(image=a_ , mean=a_ , std=a_ ) for image in images]
__snake_case = [to_channel_dimension_format(a_ , a_ ) for image in images]
__snake_case = {"pixel_values": images}
return BatchFeature(data=a_ , tensor_type=a_ )
| 680 |
'''simple docstring'''
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class SCREAMING_SNAKE_CASE__ :
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
def A ( self : Any ):
"""simple docstring"""
return self.__class__(**{k: copy.deepcopy(a_ ) for k, v in self.__dict__.items()} )
| 680 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
UpperCAmelCase = {
'''configuration_ernie''': ['''ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ErnieConfig''', '''ErnieOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ErnieForCausalLM''',
'''ErnieForMaskedLM''',
'''ErnieForMultipleChoice''',
'''ErnieForNextSentencePrediction''',
'''ErnieForPreTraining''',
'''ErnieForQuestionAnswering''',
'''ErnieForSequenceClassification''',
'''ErnieForTokenClassification''',
'''ErnieModel''',
'''ErniePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 84 | import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def _snake_case ( lowerCAmelCase : List[Any] ):
"""simple docstring"""
return EnvironmentCommand()
class a__ ( A__ ):
@staticmethod
def __UpperCamelCase ( _A : ArgumentParser ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = parser.add_parser("env" )
download_parser.set_defaults(func=_A )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = huggingface_hub.__version__
SCREAMING_SNAKE_CASE_ : Tuple = "not installed"
SCREAMING_SNAKE_CASE_ : Any = "NA"
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.__version__
SCREAMING_SNAKE_CASE_ : Dict = torch.cuda.is_available()
SCREAMING_SNAKE_CASE_ : str = "not installed"
if is_transformers_available():
import transformers
SCREAMING_SNAKE_CASE_ : Optional[Any] = transformers.__version__
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "not installed"
if is_accelerate_available():
import accelerate
SCREAMING_SNAKE_CASE_ : Any = accelerate.__version__
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "not installed"
if is_xformers_available():
import xformers
SCREAMING_SNAKE_CASE_ : List[str] = xformers.__version__
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
"`diffusers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"PyTorch version (GPU?)": F'{pt_version} ({pt_cuda_available})',
"Huggingface_hub version": hub_version,
"Transformers version": transformers_version,
"Accelerate version": accelerate_version,
"xFormers version": xformers_version,
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(_A ) )
return info
@staticmethod
def __UpperCamelCase ( _A : Optional[int] ):
"""simple docstring"""
return "\n".join([F'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
| 216 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
_a = logging.get_logger(__name__)
_a = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
# See all MVP models at https://huggingface.co/models?filter=mvp
_a = {
"""vocab_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json""",
},
"""added_tokens.json""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json""",
},
"""merges_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json""",
},
}
_a = {
"""RUCAIBox/mvp""": 1024,
}
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ['input_ids', 'attention_mask']
lowercase__ = MvpTokenizer
def __init__( self , __a=None , __a=None , __a=None , __a="replace" , __a="<s>" , __a="</s>" , __a="</s>" , __a="<s>" , __a="<unk>" , __a="<pad>" , __a="<mask>" , __a=False , __a=True , **__a , ) -> List[str]:
'''simple docstring'''
super().__init__(
__a , __a , tokenizer_file=__a , errors=__a , bos_token=__a , eos_token=__a , sep_token=__a , cls_token=__a , unk_token=__a , pad_token=__a , mask_token=__a , add_prefix_space=__a , trim_offsets=__a , **__a , )
_UpperCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('''add_prefix_space''' , __a) != add_prefix_space:
_UpperCamelCase = getattr(__a , pre_tok_state.pop('''type'''))
_UpperCamelCase = add_prefix_space
_UpperCamelCase = pre_tok_class(**__a)
_UpperCamelCase = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_UpperCamelCase = '''post_processor'''
_UpperCamelCase = getattr(self.backend_tokenizer , __a , __a)
if tokenizer_component_instance:
_UpperCamelCase = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_UpperCamelCase = tuple(state['''sep'''])
if "cls" in state:
_UpperCamelCase = tuple(state['''cls'''])
_UpperCamelCase = False
if state.get('''add_prefix_space''' , __a) != add_prefix_space:
_UpperCamelCase = add_prefix_space
_UpperCamelCase = True
if state.get('''trim_offsets''' , __a) != trim_offsets:
_UpperCamelCase = trim_offsets
_UpperCamelCase = True
if changes_to_apply:
_UpperCamelCase = getattr(__a , state.pop('''type'''))
_UpperCamelCase = component_class(**__a)
setattr(self.backend_tokenizer , __a , __a)
@property
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''')
return None
return str(self._mask_token)
@mask_token.setter
def UpperCAmelCase ( self , __a) -> Any:
'''simple docstring'''
_UpperCamelCase = AddedToken(__a , lstrip=__a , rstrip=__a) if isinstance(__a , __a) else value
_UpperCamelCase = value
def UpperCAmelCase ( self , *__a , **__a) -> BatchEncoding:
'''simple docstring'''
_UpperCamelCase = kwargs.get('''is_split_into_words''' , __a)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''')
return super()._batch_encode_plus(*__a , **__a)
def UpperCAmelCase ( self , *__a , **__a) -> BatchEncoding:
'''simple docstring'''
_UpperCamelCase = kwargs.get('''is_split_into_words''' , __a)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''')
return super()._encode_plus(*__a , **__a)
def UpperCAmelCase ( self , __a , __a = None) -> Tuple[str]:
'''simple docstring'''
_UpperCamelCase = self._tokenizer.model.save(__a , name=__a)
return tuple(__a)
def UpperCAmelCase ( self , __a , __a=None) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCAmelCase ( self , __a , __a = None) -> List[int]:
'''simple docstring'''
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 78 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_a = {
"""configuration_perceiver""": ["""PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PerceiverConfig""", """PerceiverOnnxConfig"""],
"""tokenization_perceiver""": ["""PerceiverTokenizer"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ["""PerceiverFeatureExtractor"""]
_a = ["""PerceiverImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"""PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PerceiverForImageClassificationConvProcessing""",
"""PerceiverForImageClassificationFourier""",
"""PerceiverForImageClassificationLearned""",
"""PerceiverForMaskedLM""",
"""PerceiverForMultimodalAutoencoding""",
"""PerceiverForOpticalFlow""",
"""PerceiverForSequenceClassification""",
"""PerceiverLayer""",
"""PerceiverModel""",
"""PerceiverPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 78 | 1 |
'''simple docstring'''
from statistics import mean
import numpy as np
def UpperCAmelCase_ ( __lowerCamelCase : list ,__lowerCamelCase : list ,__lowerCamelCase : list ,__lowerCamelCase : int ):
lowercase_ :Optional[Any] = 0
# Number of processes finished
lowercase_ :List[str] = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
lowercase_ :int = [0] * no_of_process
# List to include calculation results
lowercase_ :List[str] = [0] * no_of_process
# Sort by arrival time.
lowercase_ :Optional[Any] = [burst_time[i] for i in np.argsort(__lowerCamelCase )]
lowercase_ :Union[str, Any] = [process_name[i] for i in np.argsort(__lowerCamelCase )]
arrival_time.sort()
while no_of_process > finished_process_count:
lowercase_ :List[Any] = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
lowercase_ :Any = arrival_time[i]
lowercase_ :Any = 0
# Index showing the location of the process being performed
lowercase_ :List[str] = 0
# Saves the current response ratio.
lowercase_ :Union[str, Any] = 0
for i in range(0 ,__lowerCamelCase ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
lowercase_ :int = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
lowercase_ :Tuple = temp
lowercase_ :Optional[int] = i
# Calculate the turn around time
lowercase_ :Dict = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
lowercase_ :Dict = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def UpperCAmelCase_ ( __lowerCamelCase : list ,__lowerCamelCase : list ,__lowerCamelCase : list ,__lowerCamelCase : int ):
lowercase_ :Any = [0] * no_of_process
for i in range(0 ,__lowerCamelCase ):
lowercase_ :Tuple = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
lowerCAmelCase : Tuple =5
lowerCAmelCase : Any =['''A''', '''B''', '''C''', '''D''', '''E''']
lowerCAmelCase : int =[1, 2, 3, 4, 5]
lowerCAmelCase : List[str] =[1, 2, 3, 4, 5]
lowerCAmelCase : Union[str, Any] =calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
lowerCAmelCase : int =calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print('''Process name \tArrival time \tBurst time \tTurn around time \tWaiting time''')
for i in range(0, no_of_process):
print(
F'''{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'''
F'''{turn_around_time[i]}\t\t\t{waiting_time[i]}'''
)
print(F'''average waiting time : {mean(waiting_time):.5f}''')
print(F'''average turn around time : {mean(turn_around_time):.5f}''')
| 172 |
'''simple docstring'''
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class a_ ( _lowerCAmelCase ):
def lowercase__ ( self : Tuple , lowercase : float ):
"""simple docstring"""
return 0.0
def UpperCAmelCase_ ( __lowerCamelCase : np.ndarray ,__lowerCamelCase : int ):
lowercase_ :List[str] = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
lowercase_ :Any = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def UpperCAmelCase_ ( __lowerCamelCase : FilterType ,__lowerCamelCase : int ):
lowercase_ :List[Any] = 5_12
lowercase_ :Any = [1] + [0] * (size - 1)
lowercase_ :List[str] = [filter_type.process(__lowerCamelCase ) for item in inputs]
lowercase_ :Optional[int] = [0] * (samplerate - size) # zero-padding
outputs += filler
lowercase_ :Tuple = np.abs(np.fft.fft(__lowerCamelCase ) )
lowercase_ :List[Any] = 20 * np.logaa(__lowerCamelCase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 ,samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
# Display within reasonable bounds
lowercase_ :Tuple = get_bounds(__lowerCamelCase ,__lowerCamelCase )
plt.ylim(max([-80, bounds[0]] ) ,min([80, bounds[1]] ) )
plt.ylabel("Gain (dB)" )
plt.plot(__lowerCamelCase )
plt.show()
def UpperCAmelCase_ ( __lowerCamelCase : FilterType ,__lowerCamelCase : int ):
lowercase_ :Union[str, Any] = 5_12
lowercase_ :Union[str, Any] = [1] + [0] * (size - 1)
lowercase_ :Any = [filter_type.process(__lowerCamelCase ) for item in inputs]
lowercase_ :Union[str, Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
lowercase_ :Union[str, Any] = np.angle(np.fft.fft(__lowerCamelCase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 ,samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
plt.ylim(-2 * pi ,2 * pi )
plt.ylabel("Phase shift (Radians)" )
plt.plot(np.unwrap(__lowerCamelCase ,-2 * pi ) )
plt.show()
| 172 | 1 |
"""simple docstring"""
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
_SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def lowerCamelCase__ ( _lowerCamelCase : List[str] , _lowerCamelCase : Tuple , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : List[Any] ) -> str:
for attribute in key.split('.' ):
lowerCamelCase_ = getattr(_lowerCamelCase , _lowerCamelCase )
if weight_type is not None:
lowerCamelCase_ = getattr(_lowerCamelCase , _lowerCamelCase ).shape
else:
lowerCamelCase_ = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowerCamelCase_ = value
elif weight_type == "weight_g":
lowerCamelCase_ = value
elif weight_type == "weight_v":
lowerCamelCase_ = value
elif weight_type == "bias":
lowerCamelCase_ = value
else:
lowerCamelCase_ = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def lowerCamelCase__ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : str ) -> Any:
lowerCamelCase_ = []
lowerCamelCase_ = fairseq_model.state_dict()
lowerCamelCase_ = hf_model.feature_extractor
lowerCamelCase_ = hf_model.adapter
for name, value in fairseq_dict.items():
lowerCamelCase_ = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == 'group' , )
lowerCamelCase_ = True
elif any(x in name for x in ['adaptor', 'w2v_encoder.proj.', 'w2v_proj_ln.'] ):
load_adapter(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
lowerCamelCase_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
lowerCamelCase_ = True
if "*" in mapped_key:
lowerCamelCase_ = name.split(_lowerCamelCase )[0].split('.' )[-2]
lowerCamelCase_ = mapped_key.replace('*' , _lowerCamelCase )
if "weight_g" in name:
lowerCamelCase_ = 'weight_g'
elif "weight_v" in name:
lowerCamelCase_ = 'weight_v'
elif "bias" in name:
lowerCamelCase_ = 'bias'
elif "weight" in name:
lowerCamelCase_ = 'weight'
else:
lowerCamelCase_ = None
set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
continue
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowerCamelCase__ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : int , _lowerCamelCase : str , _lowerCamelCase : List[Any] ) -> Optional[int]:
lowerCamelCase_ = full_name.split('conv_layers.' )[-1]
lowerCamelCase_ = name.split('.' )
lowerCamelCase_ = int(items[0] )
lowerCamelCase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowerCamelCase_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowerCamelCase_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowerCamelCase_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowerCamelCase_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : List[Any] ) -> List[str]:
lowerCamelCase_ = full_name.split('adaptor.' )[-1]
lowerCamelCase_ = name.split('.' )
if items[1].isdigit():
lowerCamelCase_ = int(items[1] )
else:
lowerCamelCase_ = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'''
lowerCamelCase_ = value
logger.info(F'''Adapter proj layer norm bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'''
lowerCamelCase_ = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'''
lowerCamelCase_ = value
logger.info(F'''Adapter proj layer bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'''
lowerCamelCase_ = value
logger.info(F'''Adapter proj layer weight was initialized from {full_name}.''' )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'''
lowerCamelCase_ = value
logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'''
lowerCamelCase_ = value
logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
else:
unused_weights.append(_lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase : Optional[int] ) -> int:
lowerCamelCase_ , lowerCamelCase_ = emb.weight.shape
lowerCamelCase_ = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
lowerCamelCase_ = emb.weight.data
return lin_layer
@torch.no_grad()
def lowerCamelCase__ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : str , _lowerCamelCase : Any , _lowerCamelCase : str , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : Any , _lowerCamelCase : Optional[int] , ) -> Dict:
lowerCamelCase_ = WavaVecaConfig.from_pretrained(
_lowerCamelCase , add_adapter=_lowerCamelCase , adapter_stride=_lowerCamelCase , adapter_kernel_size=_lowerCamelCase , use_auth_token=_lowerCamelCase , output_hidden_size=_lowerCamelCase , )
lowerCamelCase_ = MBartConfig.from_pretrained(_lowerCamelCase )
# load model
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
'config_yaml': config_yaml_path,
'data': '/'.join(dict_path.split('/' )[:-1] ),
'w2v_path': checkpoint_path,
'load_pretrained_decoder_from': None,
} , )
lowerCamelCase_ = model[0].eval()
# load feature extractor
lowerCamelCase_ = WavaVecaFeatureExtractor.from_pretrained(_lowerCamelCase , use_auth_token=_lowerCamelCase )
# set weights for wav2vec2 encoder
lowerCamelCase_ = WavaVecaModel(_lowerCamelCase )
recursively_load_weights_wavaveca(model.encoder , _lowerCamelCase )
# load decoder weights
lowerCamelCase_ = MBartForCausalLM(_lowerCamelCase )
lowerCamelCase_ , lowerCamelCase_ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_lowerCamelCase )
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
lowerCamelCase_ = SpeechEncoderDecoderModel(encoder=_lowerCamelCase , decoder=_lowerCamelCase )
lowerCamelCase_ = False
lowerCamelCase_ = MBartaaTokenizer(_lowerCamelCase )
tokenizer.save_pretrained(_lowerCamelCase )
lowerCamelCase_ = hf_wavavec.config.to_dict()
lowerCamelCase_ = tokenizer.pad_token_id
lowerCamelCase_ = tokenizer.bos_token_id
lowerCamelCase_ = tokenizer.eos_token_id
lowerCamelCase_ = 'mbart50'
lowerCamelCase_ = 'wav2vec2'
lowerCamelCase_ = tokenizer.eos_token_id
lowerCamelCase_ = 250004
lowerCamelCase_ = tokenizer.eos_token_id
lowerCamelCase_ = SpeechEncoderDecoderConfig.from_dict(_lowerCamelCase )
hf_wavavec.save_pretrained(_lowerCamelCase )
feature_extractor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_yaml_path''', default=None, type=str, help='''Path to yaml file of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-xls-r-1b''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/mbart-large-50-one-to-many-mmt''',
type=str,
help='''Path to hf decoder checkpoint config''',
)
parser.add_argument('''--add_adapter''', default=True, type=bool, help='''whethere to add model adapter layers''')
parser.add_argument('''--adapter_stride''', default=2, type=int, help='''stride of adapter layers''')
parser.add_argument('''--adapter_kernel_size''', default=3, type=int, help='''kernel size of adapter layers''')
parser.add_argument('''--encoder_output_dim''', default=1024, type=int, help='''encoder output dim''')
parser.add_argument('''--start_token_id''', default=25_0004, type=int, help='''`decoder_start_token_id` of model config''')
_SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 714 |
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class a ( __snake_case ):
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Distribution , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Tuple=0 ) -> Optional[Any]:
lowerCamelCase_ = 1.0 if scale is None else scale
lowerCamelCase_ = 0.0 if loc is None else loc
super().__init__(__SCREAMING_SNAKE_CASE , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__SCREAMING_SNAKE_CASE )] )
@property
def UpperCamelCase ( self : Optional[Any] ) -> Tuple:
return self.base_dist.mean * self.scale + self.loc
@property
def UpperCamelCase ( self : int ) -> Any:
return self.base_dist.variance * self.scale**2
@property
def UpperCamelCase ( self : List[Any] ) -> str:
return self.variance.sqrt()
class a ( nn.Module ):
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Dict[str, int] , __SCREAMING_SNAKE_CASE : Callable[..., Tuple[torch.Tensor]] , **__SCREAMING_SNAKE_CASE : str ) -> None:
super().__init__(**__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = args_dim
lowerCamelCase_ = nn.ModuleList([nn.Linear(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for dim in args_dim.values()] )
lowerCamelCase_ = domain_map
def UpperCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : torch.Tensor ) -> Tuple[torch.Tensor]:
lowerCamelCase_ = [proj(__SCREAMING_SNAKE_CASE ) for proj in self.proj]
return self.domain_map(*__SCREAMING_SNAKE_CASE )
class a ( nn.Module ):
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[int]:
super().__init__()
lowerCamelCase_ = function
def UpperCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : int , *__SCREAMING_SNAKE_CASE : List[Any] ) -> List[Any]:
return self.function(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE )
class a :
SCREAMING_SNAKE_CASE : type
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : Dict[str, int]
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : int = 1 ) -> None:
lowerCamelCase_ = dim
lowerCamelCase_ = {k: dim * self.args_dim[k] for k in self.args_dim}
def UpperCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] ) -> List[Any]:
if self.dim == 1:
return self.distribution_class(*__SCREAMING_SNAKE_CASE )
else:
return Independent(self.distribution_class(*__SCREAMING_SNAKE_CASE ) , 1 )
def UpperCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[torch.Tensor] = None , __SCREAMING_SNAKE_CASE : Optional[torch.Tensor] = None , ) -> Distribution:
lowerCamelCase_ = self._base_distribution(__SCREAMING_SNAKE_CASE )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(__SCREAMING_SNAKE_CASE , loc=__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE , event_dim=self.event_dim )
@property
def UpperCamelCase ( self : int ) -> Tuple:
return () if self.dim == 1 else (self.dim,)
@property
def UpperCamelCase ( self : Optional[Any] ) -> int:
return len(self.event_shape )
@property
def UpperCamelCase ( self : List[Any] ) -> float:
return 0.0
def UpperCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : int ) -> nn.Module:
return ParameterProjection(
in_features=__SCREAMING_SNAKE_CASE , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def UpperCamelCase ( self : List[str] , *__SCREAMING_SNAKE_CASE : torch.Tensor ) -> List[str]:
raise NotImplementedError()
@staticmethod
def UpperCamelCase ( __SCREAMING_SNAKE_CASE : torch.Tensor ) -> torch.Tensor:
return (x + torch.sqrt(torch.square(__SCREAMING_SNAKE_CASE ) + 4.0 )) / 2.0
class a ( __snake_case ):
SCREAMING_SNAKE_CASE : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
SCREAMING_SNAKE_CASE : type = StudentT
@classmethod
def UpperCamelCase ( cls : str , __SCREAMING_SNAKE_CASE : torch.Tensor , __SCREAMING_SNAKE_CASE : torch.Tensor , __SCREAMING_SNAKE_CASE : torch.Tensor ) -> Optional[Any]:
lowerCamelCase_ = cls.squareplus(__SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps )
lowerCamelCase_ = 2.0 + cls.squareplus(__SCREAMING_SNAKE_CASE )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class a ( __snake_case ):
SCREAMING_SNAKE_CASE : Dict[str, int] = {"loc": 1, "scale": 1}
SCREAMING_SNAKE_CASE : type = Normal
@classmethod
def UpperCamelCase ( cls : Dict , __SCREAMING_SNAKE_CASE : torch.Tensor , __SCREAMING_SNAKE_CASE : torch.Tensor ) -> Tuple:
lowerCamelCase_ = cls.squareplus(__SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class a ( __snake_case ):
SCREAMING_SNAKE_CASE : Dict[str, int] = {"total_count": 1, "logits": 1}
SCREAMING_SNAKE_CASE : type = NegativeBinomial
@classmethod
def UpperCamelCase ( cls : Dict , __SCREAMING_SNAKE_CASE : torch.Tensor , __SCREAMING_SNAKE_CASE : torch.Tensor ) -> Optional[Any]:
lowerCamelCase_ = cls.squareplus(__SCREAMING_SNAKE_CASE )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def UpperCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Distribution:
lowerCamelCase_ , lowerCamelCase_ = distr_args
if self.dim == 1:
return self.distribution_class(total_count=__SCREAMING_SNAKE_CASE , logits=__SCREAMING_SNAKE_CASE )
else:
return Independent(self.distribution_class(total_count=__SCREAMING_SNAKE_CASE , logits=__SCREAMING_SNAKE_CASE ) , 1 )
def UpperCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[torch.Tensor] = None , __SCREAMING_SNAKE_CASE : Optional[torch.Tensor] = None ) -> Distribution:
lowerCamelCase_ , lowerCamelCase_ = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 137 | 0 |
'''simple docstring'''
from math import sqrt
def A__ ( __lowerCAmelCase : int = 100_0000 ):
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(__lowerCAmelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F'{solution() = }')
| 50 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class SCREAMING_SNAKE_CASE_ ( _UpperCamelCase ):
"""simple docstring"""
def lowerCamelCase__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase : Optional[Any] = SMALL_MODEL_IDENTIFIER
__UpperCamelCase : List[str] = """pt"""
__UpperCamelCase : List[Any] = """tf"""
def lowerCamelCase__ ( self : Any , lowerCAmelCase : List[Any] ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase : str = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(lowerCAmelCase )
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : Any ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase : List[str] = TFAutoModel.from_pretrained(self.test_model , from_pt=lowerCAmelCase )
model_tf.save_pretrained(lowerCAmelCase )
def lowerCamelCase__ ( self : str ) -> Tuple:
"""simple docstring"""
__UpperCamelCase : Any = """mock_framework"""
# Framework provided - return whatever the user provides
__UpperCamelCase : List[Any] = FeaturesManager.determine_framework(self.test_model , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(lowerCAmelCase )
__UpperCamelCase : Union[str, Any] = FeaturesManager.determine_framework(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(lowerCAmelCase )
__UpperCamelCase : Any = FeaturesManager.determine_framework(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(lowerCAmelCase )
__UpperCamelCase : Optional[Any] = FeaturesManager.determine_framework(lowerCAmelCase )
self.assertEqual(lowerCAmelCase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(lowerCAmelCase )
__UpperCamelCase : Union[str, Any] = FeaturesManager.determine_framework(lowerCAmelCase )
self.assertEqual(lowerCAmelCase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(lowerCAmelCase ):
__UpperCamelCase : int = FeaturesManager.determine_framework(lowerCAmelCase )
def lowerCamelCase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
__UpperCamelCase : str = MagicMock(return_value=lowerCAmelCase )
with patch("""transformers.onnx.features.is_tf_available""" , lowerCAmelCase ):
__UpperCamelCase : Any = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowerCAmelCase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
__UpperCamelCase : Optional[int] = MagicMock(return_value=lowerCAmelCase )
with patch("""transformers.onnx.features.is_torch_available""" , lowerCAmelCase ):
__UpperCamelCase : Any = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowerCAmelCase , self.framework_tf )
# Both in environment -> use PyTorch
__UpperCamelCase : Dict = MagicMock(return_value=lowerCAmelCase )
__UpperCamelCase : Any = MagicMock(return_value=lowerCAmelCase )
with patch("""transformers.onnx.features.is_tf_available""" , lowerCAmelCase ), patch(
"""transformers.onnx.features.is_torch_available""" , lowerCAmelCase ):
__UpperCamelCase : int = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowerCAmelCase , self.framework_pt )
# Both not in environment -> raise error
__UpperCamelCase : Union[str, Any] = MagicMock(return_value=lowerCAmelCase )
__UpperCamelCase : Tuple = MagicMock(return_value=lowerCAmelCase )
with patch("""transformers.onnx.features.is_tf_available""" , lowerCAmelCase ), patch(
"""transformers.onnx.features.is_torch_available""" , lowerCAmelCase ):
with self.assertRaises(lowerCAmelCase ):
__UpperCamelCase : str = FeaturesManager.determine_framework(self.test_model )
| 279 | 0 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
lowerCAmelCase = """sshleifer/bart-tiny-random"""
lowerCAmelCase = """patrickvonplaten/t5-tiny-random"""
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@cached_property
def A( self):
return AutoConfig.from_pretrained(lowercase__)
def A( self):
__UpperCAmelCase : Dict = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.num_hidden_layers , 1)
def A( self):
__UpperCAmelCase : Union[str, Any] = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=lowercase__)
def A( self):
__UpperCAmelCase : Tuple = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=lowercase__)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers)
def A( self):
__UpperCAmelCase : Dict = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , 1)
def A( self):
with self.assertRaises(lowercase__):
create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=lowercase__ , d=lowercase__)
| 706 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
lowerCAmelCase = """https://www.indeed.co.in/jobs?q=mobile+app+development&l="""
def __SCREAMING_SNAKE_CASE ( lowercase_ = "mumbai" ) -> Generator[tuple[str, str], None, None]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
__UpperCAmelCase : str = job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
__UpperCAmelCase : List[str] = job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("""Bangalore"""), 1):
print(F'Job {i:>2} is {job[0]} at {job[1]}')
| 675 | 0 |
def a (lowerCAmelCase__ ):
__a = []
if len(lowerCAmelCase__ ) == 1:
return [nums.copy()]
for _ in range(len(lowerCAmelCase__ ) ):
__a = nums.pop(0 )
__a = permute(lowerCAmelCase__ )
for perm in permutations:
perm.append(lowerCAmelCase__ )
result.extend(lowerCAmelCase__ )
nums.append(lowerCAmelCase__ )
return result
def a (lowerCAmelCase__ ):
def backtrack(lowerCAmelCase__ ):
if start == len(lowerCAmelCase__ ) - 1:
output.append(nums[:] )
else:
for i in range(lowerCAmelCase__ , len(lowerCAmelCase__ ) ):
__a , __a = nums[i], nums[start]
backtrack(start + 1 )
__a , __a = nums[i], nums[start] # backtrack
__a = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
SCREAMING_SNAKE_CASE = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 99 |
"""simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class _a ( lowerCAmelCase):
"""simple docstring"""
def __init__( self : List[Any] , __UpperCamelCase : str = "▁" , __UpperCamelCase : bool = True , __UpperCamelCase : Union[str, AddedToken] = "<unk>" , __UpperCamelCase : Union[str, AddedToken] = "</s>" , __UpperCamelCase : Union[str, AddedToken] = "<pad>" , )->List[Any]:
_UpperCAmelCase = {
'''pad''': {'''id''': 0, '''token''': pad_token},
'''eos''': {'''id''': 1, '''token''': eos_token},
'''unk''': {'''id''': 2, '''token''': unk_token},
}
_UpperCAmelCase = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
_UpperCAmelCase = token_dict['''token''']
_UpperCAmelCase = Tokenizer(Unigram() )
_UpperCAmelCase = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(''' {2,}''' ) , ''' ''' ),
normalizers.Lowercase(),
] )
_UpperCAmelCase = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=__UpperCamelCase , add_prefix_space=__UpperCamelCase ),
pre_tokenizers.Digits(individual_digits=__UpperCamelCase ),
pre_tokenizers.Punctuation(),
] )
_UpperCAmelCase = decoders.Metaspace(replacement=__UpperCamelCase , add_prefix_space=__UpperCamelCase )
_UpperCAmelCase = TemplateProcessing(
single=F'$A {self.special_tokens["eos"]["token"]}' , special_tokens=[(self.special_tokens['''eos''']['''token'''], self.special_tokens['''eos''']['''id'''])] , )
_UpperCAmelCase = {
'''model''': '''SentencePieceUnigram''',
'''replacement''': replacement,
'''add_prefix_space''': add_prefix_space,
}
super().__init__(__UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : int , __UpperCamelCase : Union[str, List[str]] , __UpperCamelCase : int = 8_0_0_0 , __UpperCamelCase : bool = True , )->Any:
_UpperCAmelCase = trainers.UnigramTrainer(
vocab_size=__UpperCamelCase , special_tokens=self.special_tokens_list , show_progress=__UpperCamelCase , )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
_UpperCAmelCase = [files]
self._tokenizer.train(__UpperCamelCase , trainer=__UpperCamelCase )
self.add_unk_id()
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : Union[Iterator[str], Iterator[Iterator[str]]] , __UpperCamelCase : int = 8_0_0_0 , __UpperCamelCase : bool = True , )->int:
_UpperCAmelCase = trainers.UnigramTrainer(
vocab_size=__UpperCamelCase , special_tokens=self.special_tokens_list , show_progress=__UpperCamelCase , )
self._tokenizer.train_from_iterator(__UpperCamelCase , trainer=__UpperCamelCase )
self.add_unk_id()
def lowercase__ ( self : int )->Dict:
_UpperCAmelCase = json.loads(self._tokenizer.to_str() )
_UpperCAmelCase = self.special_tokens['''unk''']['''id''']
_UpperCAmelCase = Tokenizer.from_str(json.dumps(__UpperCamelCase ) )
| 602 | 0 |
import os
import pytest
from attr import dataclass
_A : Any = 'us-east-1' # defaults region
@dataclass
class __SCREAMING_SNAKE_CASE :
_UpperCAmelCase : str
_UpperCAmelCase : Any = "arn:aws:iam::558105141721:role/sagemaker_execution_role"
_UpperCAmelCase : List[str] = {
"task_name": "mnli",
"per_device_train_batch_size": 1_6,
"per_device_eval_batch_size": 1_6,
"do_train": True,
"do_eval": True,
"do_predict": True,
"output_dir": "/opt/ml/model",
"overwrite_output_dir": True,
"max_steps": 5_0_0,
"save_steps": 5_5_0_0,
}
_UpperCAmelCase : int = {**hyperparameters, "max_steps": 1_0_0_0}
@property
def __lowerCamelCase ( self : str ) ->str:
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def __lowerCamelCase ( self : int ) ->str:
return F"{self.framework}-transfromers-test"
@property
def __lowerCamelCase ( self : str ) ->str:
return F"./tests/sagemaker/scripts/{self.framework}"
@property
def __lowerCamelCase ( self : Any ) ->str:
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='''class''' )
def _a ( UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__ : Any = SageMakerTestEnvironment(framework=request.cls.framework )
| 130 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
_A : int = logging.get_logger(__name__)
_A : int = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_A : Optional[int] = {
'vocab_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/vocab.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/vocab.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/vocab.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'
),
},
'merges_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/merges.txt',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/merges.txt',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/merges.txt',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'
),
},
'tokenizer_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/tokenizer.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/tokenizer.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json',
'roberta-base-openai-detector': (
'https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'
),
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'
),
},
}
_A : Any = {
'roberta-base': 5_12,
'roberta-large': 5_12,
'roberta-large-mnli': 5_12,
'distilroberta-base': 5_12,
'roberta-base-openai-detector': 5_12,
'roberta-large-openai-detector': 5_12,
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : str = VOCAB_FILES_NAMES
_UpperCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : int = ["input_ids", "attention_mask"]
_UpperCAmelCase : List[str] = RobertaTokenizer
def __init__( self : List[str] , A : List[str]=None , A : List[Any]=None , A : Optional[int]=None , A : Tuple="replace" , A : int="<s>" , A : Any="</s>" , A : Optional[int]="</s>" , A : Tuple="<s>" , A : int="<unk>" , A : Optional[int]="<pad>" , A : Tuple="<mask>" , A : Optional[Any]=False , A : Optional[int]=True , **A : Optional[int] , ) ->Dict:
super().__init__(
A , A , tokenizer_file=A , errors=A , bos_token=A , eos_token=A , sep_token=A , cls_token=A , unk_token=A , pad_token=A , mask_token=A , add_prefix_space=A , trim_offsets=A , **A , )
lowerCamelCase__ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , A ) != add_prefix_space:
lowerCamelCase__ : Union[str, Any] = getattr(A , pre_tok_state.pop('''type''' ) )
lowerCamelCase__ : Optional[int] = add_prefix_space
lowerCamelCase__ : List[str] = pre_tok_class(**A )
lowerCamelCase__ : Any = add_prefix_space
lowerCamelCase__ : Any = '''post_processor'''
lowerCamelCase__ : str = getattr(self.backend_tokenizer , A , A )
if tokenizer_component_instance:
lowerCamelCase__ : str = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCamelCase__ : Any = tuple(state['''sep'''] )
if "cls" in state:
lowerCamelCase__ : Tuple = tuple(state['''cls'''] )
lowerCamelCase__ : Tuple = False
if state.get('''add_prefix_space''' , A ) != add_prefix_space:
lowerCamelCase__ : Optional[Any] = add_prefix_space
lowerCamelCase__ : Optional[Any] = True
if state.get('''trim_offsets''' , A ) != trim_offsets:
lowerCamelCase__ : Tuple = trim_offsets
lowerCamelCase__ : str = True
if changes_to_apply:
lowerCamelCase__ : Optional[int] = getattr(A , state.pop('''type''' ) )
lowerCamelCase__ : str = component_class(**A )
setattr(self.backend_tokenizer , A , A )
@property
def __lowerCamelCase ( self : List[str] ) ->str:
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCamelCase ( self : int , A : List[Any] ) ->List[Any]:
lowerCamelCase__ : Union[str, Any] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else value
lowerCamelCase__ : int = value
def __lowerCamelCase ( self : Tuple , *A : List[Any] , **A : Optional[Any] ) ->BatchEncoding:
lowerCamelCase__ : Dict = kwargs.get('''is_split_into_words''' , A )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*A , **A )
def __lowerCamelCase ( self : Tuple , *A : List[Any] , **A : Tuple ) ->BatchEncoding:
lowerCamelCase__ : Optional[Any] = kwargs.get('''is_split_into_words''' , A )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*A , **A )
def __lowerCamelCase ( self : List[str] , A : str , A : Optional[str] = None ) ->Tuple[str]:
lowerCamelCase__ : List[str] = self._tokenizer.model.save(A , name=A )
return tuple(A )
def __lowerCamelCase ( self : Dict , A : List[str] , A : List[Any]=None ) ->List[str]:
lowerCamelCase__ : List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowerCamelCase ( self : Optional[int] , A : List[int] , A : Optional[List[int]] = None ) ->List[int]:
lowerCamelCase__ : int = [self.sep_token_id]
lowerCamelCase__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 130 | 1 |
'''simple docstring'''
from __future__ import annotations
def __snake_case ( SCREAMING_SNAKE_CASE_ : list[float] ) -> float:
"""simple docstring"""
UpperCAmelCase = 0.00
UpperCAmelCase = 0
for resistor in resistors:
if resistor <= 0:
UpperCAmelCase = f"Resistor at index {index} has a negative or zero value!"
raise ValueError(SCREAMING_SNAKE_CASE_ )
first_sum += 1 / float(SCREAMING_SNAKE_CASE_ )
index += 1
return 1 / first_sum
def __snake_case ( SCREAMING_SNAKE_CASE_ : list[float] ) -> float:
"""simple docstring"""
UpperCAmelCase = 0.00
UpperCAmelCase = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
UpperCAmelCase = f"Resistor at index {index} has a negative value!"
raise ValueError(SCREAMING_SNAKE_CASE_ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase : Union[str, Any] = logging.get_logger(__name__)
_lowercase : List[Any] = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Tuple = "camembert"
def __init__( self : Union[str, Any] , _lowercase : Any=3_05_22 , _lowercase : Any=7_68 , _lowercase : Union[str, Any]=12 , _lowercase : List[str]=12 , _lowercase : int=30_72 , _lowercase : Union[str, Any]="gelu" , _lowercase : Dict=0.1 , _lowercase : Optional[int]=0.1 , _lowercase : int=5_12 , _lowercase : Optional[Any]=2 , _lowercase : Dict=0.02 , _lowercase : Optional[Any]=1E-12 , _lowercase : Optional[int]=1 , _lowercase : Optional[Any]=0 , _lowercase : Tuple=2 , _lowercase : List[Any]="absolute" , _lowercase : List[Any]=True , _lowercase : Dict=None , **_lowercase : Optional[int] , ):
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = position_embedding_type
__UpperCAmelCase = use_cache
__UpperCAmelCase = classifier_dropout
class _UpperCAmelCase ( _lowerCAmelCase ):
@property
def a ( self : Tuple ):
if self.task == "multiple-choice":
__UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 49 | 0 |
'''simple docstring'''
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ : int = logging.get_logger(__name__)
def __a ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] ) -> Tuple:
'''simple docstring'''
lowercase_ = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'encoder.deit.blocks.{i}.norm1.weight', f'encoder.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.norm1.bias', f'encoder.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.attn.proj.weight', f'encoder.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.attn.proj.bias', f'encoder.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.norm2.weight', f'encoder.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.norm2.bias', f'encoder.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc1.weight', f'encoder.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc1.bias', f'encoder.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc2.weight', f'encoder.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.mlp.fc2.bias', f'encoder.encoder.layer.{i}.output.dense.bias') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("encoder.deit.cls_token", "encoder.embeddings.cls_token"),
("encoder.deit.pos_embed", "encoder.embeddings.position_embeddings"),
("encoder.deit.patch_embed.proj.weight", "encoder.embeddings.patch_embeddings.projection.weight"),
("encoder.deit.patch_embed.proj.bias", "encoder.embeddings.patch_embeddings.projection.bias"),
("encoder.deit.norm.weight", "encoder.layernorm.weight"),
("encoder.deit.norm.bias", "encoder.layernorm.bias"),
] )
return rename_keys
def __a ( __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] ) -> Any:
'''simple docstring'''
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
lowercase_ = state_dict.pop(f'encoder.deit.blocks.{i}.attn.qkv.weight' )
lowercase_ = in_proj_weight[
: encoder_config.hidden_size, :
]
lowercase_ = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
lowercase_ = in_proj_weight[
-encoder_config.hidden_size :, :
]
def __a ( __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ) -> str:
'''simple docstring'''
lowercase_ = dct.pop(__lowerCamelCase )
lowercase_ = val
def __a ( __lowerCamelCase : int ) -> Dict:
'''simple docstring'''
if "handwritten" in checkpoint_url:
lowercase_ = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
lowercase_ = "https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"
lowercase_ = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw ).convert("RGB" )
return im
@torch.no_grad()
def __a ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] ) -> str:
'''simple docstring'''
lowercase_ = ViTConfig(image_size=384 , qkv_bias=__lowerCamelCase )
lowercase_ = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
lowercase_ = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
lowercase_ = 1_024
lowercase_ = 4_096
lowercase_ = 24
lowercase_ = 16
lowercase_ = 1_024
else:
raise ValueError("Should either find 'base' or 'large' in checkpoint URL" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
lowercase_ = False
lowercase_ = "relu"
lowercase_ = 1_024
lowercase_ = True
lowercase_ = False
lowercase_ = False
# load HuggingFace model
lowercase_ = ViTModel(__lowerCamelCase , add_pooling_layer=__lowerCamelCase )
lowercase_ = TrOCRForCausalLM(__lowerCamelCase )
lowercase_ = VisionEncoderDecoderModel(encoder=__lowerCamelCase , decoder=__lowerCamelCase )
model.eval()
# load state_dict of original model, rename some keys
lowercase_ = torch.hub.load_state_dict_from_url(__lowerCamelCase , map_location="cpu" , check_hash=__lowerCamelCase )["model"]
lowercase_ = create_rename_keys(__lowerCamelCase , __lowerCamelCase )
for src, dest in rename_keys:
rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
read_in_q_k_v(__lowerCamelCase , __lowerCamelCase )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
lowercase_ = state_dict.pop(__lowerCamelCase )
if key.startswith("decoder" ) and "output_projection" not in key:
lowercase_ = val
else:
lowercase_ = val
# load state dict
model.load_state_dict(__lowerCamelCase )
# Check outputs on an image
lowercase_ = ViTImageProcessor(size=encoder_config.image_size )
lowercase_ = RobertaTokenizer.from_pretrained("roberta-large" )
lowercase_ = TrOCRProcessor(__lowerCamelCase , __lowerCamelCase )
lowercase_ = processor(images=prepare_img(__lowerCamelCase ) , return_tensors="pt" ).pixel_values
# verify logits
lowercase_ = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
lowercase_ = model(pixel_values=__lowerCamelCase , decoder_input_ids=__lowerCamelCase )
lowercase_ = outputs.logits
lowercase_ = torch.Size([1, 1, 50_265] )
if "trocr-base-handwritten" in checkpoint_url:
lowercase_ = torch.tensor(
[-1.4_5_0_2, -4.6_6_8_3, -0.5_3_4_7, -2.9_2_9_1, 9.1_4_3_5, -3.0_5_7_1, 8.9_7_6_4, 1.7_5_6_0, 8.7_3_5_8, -1.5_3_1_1] )
elif "trocr-large-handwritten" in checkpoint_url:
lowercase_ = torch.tensor(
[-2.6_4_3_7, -1.3_1_2_9, -2.2_5_9_6, -5.3_4_5_5, 6.3_5_3_9, 1.7_6_0_4, 5.4_9_9_1, 1.4_7_0_2, 5.6_1_1_3, 2.0_1_7_0] )
elif "trocr-base-printed" in checkpoint_url:
lowercase_ = torch.tensor(
[-5.6_8_1_6, -5.8_3_8_8, 1.1_3_9_8, -6.9_0_3_4, 6.8_5_0_5, -2.4_3_9_3, 1.2_2_8_4, -1.0_2_3_2, -1.9_6_6_1, -3.9_2_1_0] )
elif "trocr-large-printed" in checkpoint_url:
lowercase_ = torch.tensor(
[-6.0_1_6_2, -7.0_9_5_9, 4.4_1_5_5, -5.1_0_6_3, 7.0_4_6_8, -3.1_6_3_1, 2.6_4_6_6, -0.3_0_8_1, -0.8_1_0_6, -1.7_5_3_5] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , __lowerCamelCase , atol=1e-3 ), "First elements of logits not as expected"
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(__lowerCamelCase )
print(f'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
lowerCAmelCase_ : Any = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 461 | '''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class lowercase ( __lowerCamelCase ):
def __init__( self : str , __lowerCAmelCase : Callable , __lowerCAmelCase : Optional[Features] = None , __lowerCAmelCase : str = None , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional[dict] = None , __lowerCAmelCase : Optional[int] = None , **__lowerCAmelCase : Union[str, Any] , ) -> List[Any]:
super().__init__(
features=__lowerCAmelCase , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase , streaming=__lowerCAmelCase , num_proc=__lowerCAmelCase , **__lowerCAmelCase , )
lowercase_ = Generator(
cache_dir=__lowerCAmelCase , features=__lowerCAmelCase , generator=__lowerCAmelCase , gen_kwargs=__lowerCAmelCase , **__lowerCAmelCase , )
def __UpperCAmelCase ( self : Optional[int]) -> List[str]:
# Build iterable dataset
if self.streaming:
lowercase_ = self.builder.as_streaming_dataset(split="train")
# Build regular (map-style) dataset
else:
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
self.builder.download_and_prepare(
download_config=__lowerCAmelCase , download_mode=__lowerCAmelCase , verification_mode=__lowerCAmelCase , base_path=__lowerCAmelCase , num_proc=self.num_proc , )
lowercase_ = self.builder.as_dataset(
split="train" , verification_mode=__lowerCAmelCase , in_memory=self.keep_in_memory)
return dataset
| 461 | 1 |
"""simple docstring"""
from collections.abc import Sequence
def lowerCamelCase (a_ :Sequence[float] , a_ :float) -> float:
return sum(c * (x**i) for i, c in enumerate(a_))
def lowerCamelCase (a_ :Sequence[float] , a_ :float) -> float:
lowercase :List[str] = 0.0
for coeff in reversed(a_):
lowercase :Tuple = result * x + coeff
return result
if __name__ == "__main__":
UpperCAmelCase = (0.0, 0.0, 5.0, 9.3, 7.0)
UpperCAmelCase = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 677 |
"""simple docstring"""
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def lowerCamelCase (a_ :int) -> List[str]:
random.seed(a_)
np.random.seed(a_)
torch.manual_seed(a_)
torch.cuda.manual_seed_all(a_)
# ^^ safe to call this function even if cuda is not available
class __magic_name__ :
def __init__( self : Optional[Any] , snake_case__ : Iterable[torch.nn.Parameter] , snake_case__ : float = 0.99_99 , snake_case__ : float = 0.0 , snake_case__ : int = 0 , snake_case__ : bool = False , snake_case__ : Union[float, int] = 1.0 , snake_case__ : Union[float, int] = 2 / 3 , snake_case__ : Optional[Any] = None , snake_case__ : Dict[str, Any] = None , **snake_case__ : Tuple , ):
'''simple docstring'''
if isinstance(snake_case__ , torch.nn.Module ):
lowercase :int = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage`''' , '''1.0.0''' , snake_case__ , standard_warn=snake_case__ , )
lowercase :Dict = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
lowercase :Optional[Any] = True
if kwargs.get('''max_value''' , snake_case__ ) is not None:
lowercase :Optional[Any] = '''The `max_value` argument is deprecated. Please use `decay` instead.'''
deprecate('''max_value''' , '''1.0.0''' , snake_case__ , standard_warn=snake_case__ )
lowercase :Optional[int] = kwargs['''max_value''']
if kwargs.get('''min_value''' , snake_case__ ) is not None:
lowercase :List[Any] = '''The `min_value` argument is deprecated. Please use `min_decay` instead.'''
deprecate('''min_value''' , '''1.0.0''' , snake_case__ , standard_warn=snake_case__ )
lowercase :str = kwargs['''min_value''']
lowercase :Any = list(snake_case__ )
lowercase :Optional[Any] = [p.clone().detach() for p in parameters]
if kwargs.get('''device''' , snake_case__ ) is not None:
lowercase :str = '''The `device` argument is deprecated. Please use `to` instead.'''
deprecate('''device''' , '''1.0.0''' , snake_case__ , standard_warn=snake_case__ )
self.to(device=kwargs['''device'''] )
lowercase :int = None
lowercase :int = decay
lowercase :Union[str, Any] = min_decay
lowercase :List[Any] = update_after_step
lowercase :Union[str, Any] = use_ema_warmup
lowercase :Any = inv_gamma
lowercase :Any = power
lowercase :str = 0
lowercase :int = None # set in `step()`
lowercase :List[str] = model_cls
lowercase :Any = model_config
@classmethod
def __snake_case ( cls : int , snake_case__ : Tuple , snake_case__ : Union[str, Any] ):
'''simple docstring'''
lowercase , lowercase :int = model_cls.load_config(snake_case__ , return_unused_kwargs=snake_case__ )
lowercase :List[Any] = model_cls.from_pretrained(snake_case__ )
lowercase :Optional[int] = cls(model.parameters() , model_cls=snake_case__ , model_config=model.config )
ema_model.load_state_dict(snake_case__ )
return ema_model
def __snake_case ( self : int , snake_case__ : Union[str, Any] ):
'''simple docstring'''
if self.model_cls is None:
raise ValueError('''`save_pretrained` can only be used if `model_cls` was defined at __init__.''' )
if self.model_config is None:
raise ValueError('''`save_pretrained` can only be used if `model_config` was defined at __init__.''' )
lowercase :Dict = self.model_cls.from_config(self.model_config )
lowercase :Tuple = self.state_dict()
state_dict.pop('''shadow_params''' , snake_case__ )
model.register_to_config(**snake_case__ )
self.copy_to(model.parameters() )
model.save_pretrained(snake_case__ )
def __snake_case ( self : int , snake_case__ : int ):
'''simple docstring'''
lowercase :Union[str, Any] = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
lowercase :int = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
lowercase :Dict = (1 + step) / (1_0 + step)
lowercase :Optional[int] = min(snake_case__ , self.decay )
# make sure decay is not smaller than min_decay
lowercase :Optional[int] = max(snake_case__ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def __snake_case ( self : Any , snake_case__ : Iterable[torch.nn.Parameter] ):
'''simple docstring'''
if isinstance(snake_case__ , torch.nn.Module ):
lowercase :Tuple = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage.step`''' , '''1.0.0''' , snake_case__ , standard_warn=snake_case__ , )
lowercase :Union[str, Any] = parameters.parameters()
lowercase :Optional[Any] = list(snake_case__ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
lowercase :List[Any] = self.get_decay(self.optimization_step )
lowercase :Optional[Any] = decay
lowercase :List[Any] = 1 - decay
lowercase :List[str] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , snake_case__ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
lowercase :Union[str, Any] = deepspeed.zero.GatheredParameters(snake_case__ , modifier_rank=snake_case__ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(snake_case__ )
def __snake_case ( self : str , snake_case__ : Iterable[torch.nn.Parameter] ):
'''simple docstring'''
lowercase :Optional[Any] = list(snake_case__ )
for s_param, param in zip(self.shadow_params , snake_case__ ):
param.data.copy_(s_param.to(param.device ).data )
def __snake_case ( self : Optional[int] , snake_case__ : Dict=None , snake_case__ : Dict=None ):
'''simple docstring'''
lowercase :str = [
p.to(device=snake_case__ , dtype=snake_case__ ) if p.is_floating_point() else p.to(device=snake_case__ )
for p in self.shadow_params
]
def __snake_case ( self : Dict ):
'''simple docstring'''
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def __snake_case ( self : Optional[int] , snake_case__ : Iterable[torch.nn.Parameter] ):
'''simple docstring'''
lowercase :str = [param.detach().cpu().clone() for param in parameters]
def __snake_case ( self : List[Any] , snake_case__ : Iterable[torch.nn.Parameter] ):
'''simple docstring'''
if self.temp_stored_params is None:
raise RuntimeError('''This ExponentialMovingAverage has no `store()`ed weights ''' '''to `restore()`''' )
for c_param, param in zip(self.temp_stored_params , snake_case__ ):
param.data.copy_(c_param.data )
# Better memory-wise.
lowercase :Dict = None
def __snake_case ( self : Union[str, Any] , snake_case__ : dict ):
'''simple docstring'''
lowercase :List[str] = copy.deepcopy(snake_case__ )
lowercase :Any = state_dict.get('''decay''' , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError('''Decay must be between 0 and 1''' )
lowercase :int = state_dict.get('''min_decay''' , self.min_decay )
if not isinstance(self.min_decay , snake_case__ ):
raise ValueError('''Invalid min_decay''' )
lowercase :List[Any] = state_dict.get('''optimization_step''' , self.optimization_step )
if not isinstance(self.optimization_step , snake_case__ ):
raise ValueError('''Invalid optimization_step''' )
lowercase :int = state_dict.get('''update_after_step''' , self.update_after_step )
if not isinstance(self.update_after_step , snake_case__ ):
raise ValueError('''Invalid update_after_step''' )
lowercase :Optional[int] = state_dict.get('''use_ema_warmup''' , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , snake_case__ ):
raise ValueError('''Invalid use_ema_warmup''' )
lowercase :Any = state_dict.get('''inv_gamma''' , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError('''Invalid inv_gamma''' )
lowercase :Dict = state_dict.get('''power''' , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError('''Invalid power''' )
lowercase :Optional[int] = state_dict.get('''shadow_params''' , snake_case__ )
if shadow_params is not None:
lowercase :List[Any] = shadow_params
if not isinstance(self.shadow_params , snake_case__ ):
raise ValueError('''shadow_params must be a list''' )
if not all(isinstance(snake_case__ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError('''shadow_params must all be Tensors''' )
| 677 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class a_ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
# fmt: off
SCREAMING_SNAKE_CASE =['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
SCREAMING_SNAKE_CASE =dict(zip(snake_case ,range(len(snake_case ) ) ) )
SCREAMING_SNAKE_CASE =['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
SCREAMING_SNAKE_CASE ={'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(snake_case ) )
SCREAMING_SNAKE_CASE ={
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_145_466, 0.4_578_275, 0.40_821_073],
'image_std': [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname ,snake_case )
with open(self.image_processor_file ,'w' ,encoding='utf-8' ) as fp:
json.dump(snake_case ,snake_case )
def _lowerCAmelCase ( self : Any ,**snake_case : Tuple ):
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**snake_case )
def _lowerCAmelCase ( self : List[Any] ,**snake_case : Any ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**snake_case )
def _lowerCAmelCase ( self : Optional[int] ,**snake_case : List[str] ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname ,**snake_case )
def _lowerCAmelCase ( self : Tuple ):
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =[np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
SCREAMING_SNAKE_CASE =[Image.fromarray(np.moveaxis(snake_case ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE =self.get_tokenizer()
SCREAMING_SNAKE_CASE =self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE =self.get_image_processor()
SCREAMING_SNAKE_CASE =CLIPProcessor(tokenizer=snake_case ,image_processor=snake_case )
processor_slow.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE =CLIPProcessor.from_pretrained(self.tmpdirname ,use_fast=snake_case )
SCREAMING_SNAKE_CASE =CLIPProcessor(tokenizer=snake_case ,image_processor=snake_case )
processor_fast.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE =CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer ,snake_case )
self.assertIsInstance(processor_fast.tokenizer ,snake_case )
self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor ,snake_case )
self.assertIsInstance(processor_fast.image_processor ,snake_case )
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE =CLIPProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE =self.get_tokenizer(bos_token='(BOS)' ,eos_token='(EOS)' )
SCREAMING_SNAKE_CASE =self.get_image_processor(do_normalize=snake_case ,padding_value=1.0 )
SCREAMING_SNAKE_CASE =CLIPProcessor.from_pretrained(
self.tmpdirname ,bos_token='(BOS)' ,eos_token='(EOS)' ,do_normalize=snake_case ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,snake_case )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,snake_case )
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE =self.get_image_processor()
SCREAMING_SNAKE_CASE =self.get_tokenizer()
SCREAMING_SNAKE_CASE =CLIPProcessor(tokenizer=snake_case ,image_processor=snake_case )
SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
SCREAMING_SNAKE_CASE =image_processor(snake_case ,return_tensors='np' )
SCREAMING_SNAKE_CASE =processor(images=snake_case ,return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =self.get_image_processor()
SCREAMING_SNAKE_CASE =self.get_tokenizer()
SCREAMING_SNAKE_CASE =CLIPProcessor(tokenizer=snake_case ,image_processor=snake_case )
SCREAMING_SNAKE_CASE ='lower newer'
SCREAMING_SNAKE_CASE =processor(text=snake_case )
SCREAMING_SNAKE_CASE =tokenizer(snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE =self.get_image_processor()
SCREAMING_SNAKE_CASE =self.get_tokenizer()
SCREAMING_SNAKE_CASE =CLIPProcessor(tokenizer=snake_case ,image_processor=snake_case )
SCREAMING_SNAKE_CASE ='lower newer'
SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
SCREAMING_SNAKE_CASE =processor(text=snake_case ,images=snake_case )
self.assertListEqual(list(inputs.keys() ) ,['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(snake_case ):
processor()
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE =self.get_image_processor()
SCREAMING_SNAKE_CASE =self.get_tokenizer()
SCREAMING_SNAKE_CASE =CLIPProcessor(tokenizer=snake_case ,image_processor=snake_case )
SCREAMING_SNAKE_CASE =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE =processor.batch_decode(snake_case )
SCREAMING_SNAKE_CASE =tokenizer.batch_decode(snake_case )
self.assertListEqual(snake_case ,snake_case )
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =self.get_image_processor()
SCREAMING_SNAKE_CASE =self.get_tokenizer()
SCREAMING_SNAKE_CASE =CLIPProcessor(tokenizer=snake_case ,image_processor=snake_case )
SCREAMING_SNAKE_CASE ='lower newer'
SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
SCREAMING_SNAKE_CASE =processor(text=snake_case ,images=snake_case )
self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
| 252 |
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class a_ :
"""simple docstring"""
def __init__( self : Tuple ,snake_case : List[str] ,snake_case : List[str]=13 ,snake_case : Optional[Any]=7 ,snake_case : Union[str, Any]=False ,snake_case : str=True ,snake_case : Tuple=False ,snake_case : List[Any]=True ,snake_case : Tuple=33 ,snake_case : Dict=32 ,snake_case : str=5 ,snake_case : str=4 ,snake_case : int=37 ,snake_case : int="gelu" ,snake_case : int=0.1 ,snake_case : Dict=0.1 ,snake_case : int=512 ,snake_case : Optional[Any]=16 ,snake_case : List[Any]=2 ,snake_case : Tuple=0.02 ,snake_case : int=3 ,snake_case : Tuple=4 ,snake_case : List[str]=None ,):
SCREAMING_SNAKE_CASE =parent
SCREAMING_SNAKE_CASE =batch_size
SCREAMING_SNAKE_CASE =seq_length
SCREAMING_SNAKE_CASE =is_training
SCREAMING_SNAKE_CASE =use_input_mask
SCREAMING_SNAKE_CASE =use_token_type_ids
SCREAMING_SNAKE_CASE =use_labels
SCREAMING_SNAKE_CASE =vocab_size
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =hidden_dropout_prob
SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE =max_position_embeddings
SCREAMING_SNAKE_CASE =type_vocab_size
SCREAMING_SNAKE_CASE =type_sequence_label_size
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =num_labels
SCREAMING_SNAKE_CASE =num_choices
SCREAMING_SNAKE_CASE =scope
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE =None
if self.use_input_mask:
SCREAMING_SNAKE_CASE =random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE =None
SCREAMING_SNAKE_CASE =None
SCREAMING_SNAKE_CASE =None
if self.use_labels:
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] ,self.num_choices )
SCREAMING_SNAKE_CASE =self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase ( self : str ):
return EsmConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,pad_token_id=1 ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
def _lowerCAmelCase ( self : Dict ,snake_case : List[str] ,snake_case : Union[str, Any] ,snake_case : Tuple ,snake_case : List[Any] ,snake_case : List[str] ,snake_case : str ):
SCREAMING_SNAKE_CASE =EsmModel(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,attention_mask=snake_case )
SCREAMING_SNAKE_CASE =model(snake_case )
SCREAMING_SNAKE_CASE =model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def _lowerCAmelCase ( self : List[str] ,snake_case : int ,snake_case : str ,snake_case : Tuple ,snake_case : List[str] ,snake_case : Any ,snake_case : Any ):
SCREAMING_SNAKE_CASE =EsmForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,attention_mask=snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self : Tuple ,snake_case : str ,snake_case : str ,snake_case : Optional[Any] ,snake_case : Any ,snake_case : List[Any] ,snake_case : Dict ):
SCREAMING_SNAKE_CASE =self.num_labels
SCREAMING_SNAKE_CASE =EsmForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,attention_mask=snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) =config_and_inputs
SCREAMING_SNAKE_CASE ={'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = False
__UpperCAmelCase = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCAmelCase = ()
__UpperCAmelCase = (
{
'feature-extraction': EsmModel,
'fill-mask': EsmForMaskedLM,
'text-classification': EsmForSequenceClassification,
'token-classification': EsmForTokenClassification,
'zero-shot': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = True
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =EsmModelTester(self )
SCREAMING_SNAKE_CASE =ConfigTester(self ,config_class=snake_case ,hidden_size=37 )
def _lowerCAmelCase ( self : str ):
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE =type
self.model_tester.create_and_check_model(*snake_case )
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case )
@slow
def _lowerCAmelCase ( self : Any ):
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE =EsmModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()[0]
SCREAMING_SNAKE_CASE =EsmEmbeddings(config=snake_case )
SCREAMING_SNAKE_CASE =torch.as_tensor([[12, 31, 13, model.padding_idx]] )
SCREAMING_SNAKE_CASE =torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
SCREAMING_SNAKE_CASE =create_position_ids_from_input_ids(snake_case ,model.padding_idx )
self.assertEqual(position_ids.shape ,expected_positions.shape )
self.assertTrue(torch.all(torch.eq(snake_case ,snake_case ) ) )
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()[0]
SCREAMING_SNAKE_CASE =EsmEmbeddings(config=snake_case )
SCREAMING_SNAKE_CASE =torch.empty(2 ,4 ,30 )
SCREAMING_SNAKE_CASE =[
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
SCREAMING_SNAKE_CASE =torch.as_tensor([expected_single_positions, expected_single_positions] )
SCREAMING_SNAKE_CASE =embeddings.create_position_ids_from_inputs_embeds(snake_case )
self.assertEqual(position_ids.shape ,expected_positions.shape )
self.assertTrue(torch.all(torch.eq(snake_case ,snake_case ) ) )
@unittest.skip('Esm does not support embedding resizing' )
def _lowerCAmelCase ( self : List[str] ):
pass
@unittest.skip('Esm does not support embedding resizing' )
def _lowerCAmelCase ( self : Dict ):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _lowerCAmelCase ( self : Optional[int] ):
pass
@require_torch
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : Optional[int] ):
with torch.no_grad():
SCREAMING_SNAKE_CASE =EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
SCREAMING_SNAKE_CASE =torch.tensor([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE =model(snake_case )[0]
SCREAMING_SNAKE_CASE =33
SCREAMING_SNAKE_CASE =torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape ,snake_case )
SCREAMING_SNAKE_CASE =torch.tensor(
[[[8.9_215, -10.5_898, -6.4_671], [-6.3_967, -13.9_114, -1.1_212], [-7.7_812, -13.9_516, -3.7_406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,snake_case ,atol=1e-4 ) )
@slow
def _lowerCAmelCase ( self : int ):
with torch.no_grad():
SCREAMING_SNAKE_CASE =EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
SCREAMING_SNAKE_CASE =torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
SCREAMING_SNAKE_CASE =model(snake_case )[0]
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE =torch.tensor(
[[[0.1_444, 0.5_413, 0.3_248], [0.3_034, 0.0_053, 0.3_108], [0.3_228, -0.2_499, 0.3_415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,snake_case ,atol=1e-4 ) )
| 252 | 1 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
__a = (7_2_0, 1_2_8_0) # Height, Width
__a = (0.4, 0.6) # if height or width lower than this scale, drop it.
__a = 1 / 1_0_0
__a = ''
__a = ''
__a = ''
__a = 2_5_0
def a ( ):
'''simple docstring'''
lowercase_ , lowercase_ = get_dataset(snake_case__ , snake_case__ )
for index in range(snake_case__ ):
lowercase_ = random.sample(range(len(snake_case__ ) ) , 4 )
lowercase_ , lowercase_ , lowercase_ = update_image_and_anno(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , filter_scale=snake_case__ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
lowercase_ = random_chars(32 )
lowercase_ = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
lowercase_ = F'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(F'''{file_root}.jpg''' , snake_case__ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
lowercase_ = []
for anno in new_annos:
lowercase_ = anno[3] - anno[1]
lowercase_ = anno[4] - anno[2]
lowercase_ = anno[1] + width / 2
lowercase_ = anno[2] + height / 2
lowercase_ = F'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(snake_case__ )
with open(F'''{file_root}.txt''' , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def a ( snake_case__: str , snake_case__: str ):
'''simple docstring'''
lowercase_ = []
lowercase_ = []
for label_file in glob.glob(os.path.join(snake_case__ , '''*.txt''' ) ):
lowercase_ = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(snake_case__ ) as in_file:
lowercase_ = in_file.readlines()
lowercase_ = os.path.join(snake_case__ , F'''{label_name}.jpg''' )
lowercase_ = []
for obj_list in obj_lists:
lowercase_ = obj_list.rstrip('''\n''' ).split(''' ''' )
lowercase_ = float(obj[1] ) - float(obj[3] ) / 2
lowercase_ = float(obj[2] ) - float(obj[4] ) / 2
lowercase_ = float(obj[1] ) + float(obj[3] ) / 2
lowercase_ = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(snake_case__ )
labels.append(snake_case__ )
return img_paths, labels
def a ( snake_case__: list , snake_case__: list , snake_case__: list[int] , snake_case__: tuple[int, int] , snake_case__: tuple[float, float] , snake_case__: float = 0.0 , ):
'''simple docstring'''
lowercase_ = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
lowercase_ = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowercase_ = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowercase_ = int(scale_x * output_size[1] )
lowercase_ = int(scale_y * output_size[0] )
lowercase_ = []
lowercase_ = []
for i, index in enumerate(snake_case__ ):
lowercase_ = all_img_list[index]
path_list.append(snake_case__ )
lowercase_ = all_annos[index]
lowercase_ = cva.imread(snake_case__ )
if i == 0: # top-left
lowercase_ = cva.resize(snake_case__ , (divid_point_x, divid_point_y) )
lowercase_ = img
for bbox in img_annos:
lowercase_ = bbox[1] * scale_x
lowercase_ = bbox[2] * scale_y
lowercase_ = bbox[3] * scale_x
lowercase_ = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
lowercase_ = cva.resize(snake_case__ , (output_size[1] - divid_point_x, divid_point_y) )
lowercase_ = img
for bbox in img_annos:
lowercase_ = scale_x + bbox[1] * (1 - scale_x)
lowercase_ = bbox[2] * scale_y
lowercase_ = scale_x + bbox[3] * (1 - scale_x)
lowercase_ = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
lowercase_ = cva.resize(snake_case__ , (divid_point_x, output_size[0] - divid_point_y) )
lowercase_ = img
for bbox in img_annos:
lowercase_ = bbox[1] * scale_x
lowercase_ = scale_y + bbox[2] * (1 - scale_y)
lowercase_ = bbox[3] * scale_x
lowercase_ = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
lowercase_ = cva.resize(
snake_case__ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
lowercase_ = img
for bbox in img_annos:
lowercase_ = scale_x + bbox[1] * (1 - scale_x)
lowercase_ = scale_y + bbox[2] * (1 - scale_y)
lowercase_ = scale_x + bbox[3] * (1 - scale_x)
lowercase_ = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
lowercase_ = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def a ( snake_case__: int ):
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
lowercase_ = ascii_lowercase + digits
return "".join(random.choice(snake_case__ ) for _ in range(snake_case__ ) )
if __name__ == "__main__":
main()
print('DONE ✅')
| 97 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'facebook/wav2vec2-base-960h': 'https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Optional[int] = 'wav2vec2'
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str]=3_2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=7_6_8 , SCREAMING_SNAKE_CASE_ : int=1_2 , SCREAMING_SNAKE_CASE_ : Any=1_2 , SCREAMING_SNAKE_CASE_ : Optional[int]=3_0_7_2 , SCREAMING_SNAKE_CASE_ : Dict="gelu" , SCREAMING_SNAKE_CASE_ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE_ : str=0.1 , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE_ : Optional[int]=0.0 , SCREAMING_SNAKE_CASE_ : Any=0.1 , SCREAMING_SNAKE_CASE_ : str=0.1 , SCREAMING_SNAKE_CASE_ : Any=0.02 , SCREAMING_SNAKE_CASE_ : Dict=1e-5 , SCREAMING_SNAKE_CASE_ : List[Any]="group" , SCREAMING_SNAKE_CASE_ : List[str]="gelu" , SCREAMING_SNAKE_CASE_ : List[Any]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , SCREAMING_SNAKE_CASE_ : Any=(5, 2, 2, 2, 2, 2, 2) , SCREAMING_SNAKE_CASE_ : Union[str, Any]=(1_0, 3, 3, 3, 3, 2, 2) , SCREAMING_SNAKE_CASE_ : List[str]=False , SCREAMING_SNAKE_CASE_ : Optional[int]=1_2_8 , SCREAMING_SNAKE_CASE_ : str=1_6 , SCREAMING_SNAKE_CASE_ : Any=False , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : str=0.05 , SCREAMING_SNAKE_CASE_ : Dict=1_0 , SCREAMING_SNAKE_CASE_ : Tuple=2 , SCREAMING_SNAKE_CASE_ : List[str]=0.0 , SCREAMING_SNAKE_CASE_ : List[str]=1_0 , SCREAMING_SNAKE_CASE_ : List[Any]=0 , SCREAMING_SNAKE_CASE_ : Dict=3_2_0 , SCREAMING_SNAKE_CASE_ : Any=2 , SCREAMING_SNAKE_CASE_ : Tuple=0.1 , SCREAMING_SNAKE_CASE_ : Optional[Any]=1_0_0 , SCREAMING_SNAKE_CASE_ : List[Any]=2_5_6 , SCREAMING_SNAKE_CASE_ : int=2_5_6 , SCREAMING_SNAKE_CASE_ : str=0.1 , SCREAMING_SNAKE_CASE_ : List[str]="sum" , SCREAMING_SNAKE_CASE_ : Tuple=False , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False , SCREAMING_SNAKE_CASE_ : Optional[int]=2_5_6 , SCREAMING_SNAKE_CASE_ : Optional[Any]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , SCREAMING_SNAKE_CASE_ : List[Any]=(5, 3, 3, 1, 1) , SCREAMING_SNAKE_CASE_ : List[Any]=(1, 2, 3, 1, 1) , SCREAMING_SNAKE_CASE_ : Tuple=5_1_2 , SCREAMING_SNAKE_CASE_ : Dict=0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1 , SCREAMING_SNAKE_CASE_ : int=2 , SCREAMING_SNAKE_CASE_ : Dict=False , SCREAMING_SNAKE_CASE_ : int=3 , SCREAMING_SNAKE_CASE_ : List[Any]=2 , SCREAMING_SNAKE_CASE_ : str=3 , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : int=None , **SCREAMING_SNAKE_CASE_ : Optional[Any] , ) -> Any:
super().__init__(**SCREAMING_SNAKE_CASE_ , pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ )
lowercase_ = hidden_size
lowercase_ = feat_extract_norm
lowercase_ = feat_extract_activation
lowercase_ = list(SCREAMING_SNAKE_CASE_ )
lowercase_ = list(SCREAMING_SNAKE_CASE_ )
lowercase_ = list(SCREAMING_SNAKE_CASE_ )
lowercase_ = conv_bias
lowercase_ = num_conv_pos_embeddings
lowercase_ = num_conv_pos_embedding_groups
lowercase_ = len(self.conv_dim )
lowercase_ = num_hidden_layers
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = num_attention_heads
lowercase_ = hidden_dropout
lowercase_ = attention_dropout
lowercase_ = activation_dropout
lowercase_ = feat_proj_dropout
lowercase_ = final_dropout
lowercase_ = layerdrop
lowercase_ = layer_norm_eps
lowercase_ = initializer_range
lowercase_ = vocab_size
lowercase_ = do_stable_layer_norm
lowercase_ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase_ = apply_spec_augment
lowercase_ = mask_time_prob
lowercase_ = mask_time_length
lowercase_ = mask_time_min_masks
lowercase_ = mask_feature_prob
lowercase_ = mask_feature_length
lowercase_ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowercase_ = num_codevectors_per_group
lowercase_ = num_codevector_groups
lowercase_ = contrastive_logits_temperature
lowercase_ = feat_quantizer_dropout
lowercase_ = num_negatives
lowercase_ = codevector_dim
lowercase_ = proj_codevector_dim
lowercase_ = diversity_loss_weight
# ctc loss
lowercase_ = ctc_loss_reduction
lowercase_ = ctc_zero_infinity
# adapter
lowercase_ = add_adapter
lowercase_ = adapter_kernel_size
lowercase_ = adapter_stride
lowercase_ = num_adapter_layers
lowercase_ = output_hidden_size or hidden_size
lowercase_ = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowercase_ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowercase_ = list(SCREAMING_SNAKE_CASE_ )
lowercase_ = list(SCREAMING_SNAKE_CASE_ )
lowercase_ = list(SCREAMING_SNAKE_CASE_ )
lowercase_ = xvector_output_dim
@property
def _lowercase ( self : Any ) -> Dict:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 97 | 1 |
lowerCamelCase_ = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def UpperCAmelCase_ ( __UpperCamelCase ):
# Make sure the supplied data is a bytes-like object
if not isinstance(__UpperCamelCase, __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =f"""a bytes-like object is required, not '{data.__class__.__name__}'"""
raise TypeError(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ ="""""".join(bin(__UpperCamelCase )[2:].zfill(8 ) for byte in data )
SCREAMING_SNAKE_CASE__ =len(__UpperCamelCase ) % 6 != 0
if padding_needed:
# The padding that will be added later
SCREAMING_SNAKE_CASE__ =B"""=""" * ((6 - len(__UpperCamelCase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(__UpperCamelCase ) % 6)
else:
SCREAMING_SNAKE_CASE__ =B""""""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6], 2 )]
for index in range(0, len(__UpperCamelCase ), 6 ) ).encode()
+ padding
)
def UpperCAmelCase_ ( __UpperCamelCase ):
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(__UpperCamelCase, __UpperCamelCase ) and not isinstance(__UpperCamelCase, __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =(
"""argument should be a bytes-like object or ASCII string, """
f"""not '{encoded_data.__class__.__name__}'"""
)
raise TypeError(__UpperCamelCase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(__UpperCamelCase, __UpperCamelCase ):
try:
SCREAMING_SNAKE_CASE__ =encoded_data.decode("""utf-8""" )
except UnicodeDecodeError:
raise ValueError("""base64 encoded data should only contain ASCII characters""" )
SCREAMING_SNAKE_CASE__ =encoded_data.count("""=""" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(__UpperCamelCase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
SCREAMING_SNAKE_CASE__ =encoded_data[:-padding]
SCREAMING_SNAKE_CASE__ ="""""".join(
bin(B64_CHARSET.index(__UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
SCREAMING_SNAKE_CASE__ ="""""".join(
bin(B64_CHARSET.index(__UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data )
SCREAMING_SNAKE_CASE__ =[
int(binary_stream[index : index + 8], 2 )
for index in range(0, len(__UpperCamelCase ), 8 )
]
return bytes(__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 588 |
import math
def UpperCAmelCase_ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =[]
SCREAMING_SNAKE_CASE__ =2
SCREAMING_SNAKE_CASE__ =int(math.sqrt(__UpperCamelCase ) ) # Size of every segment
SCREAMING_SNAKE_CASE__ =[True] * (end + 1)
SCREAMING_SNAKE_CASE__ =[]
while start <= end:
if temp[start] is True:
in_prime.append(__UpperCamelCase )
for i in range(start * start, end + 1, __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =False
start += 1
prime += in_prime
SCREAMING_SNAKE_CASE__ =end + 1
SCREAMING_SNAKE_CASE__ =min(2 * end, __UpperCamelCase )
while low <= n:
SCREAMING_SNAKE_CASE__ =[True] * (high - low + 1)
for each in in_prime:
SCREAMING_SNAKE_CASE__ =math.floor(low / each ) * each
if t < low:
t += each
for j in range(__UpperCamelCase, high + 1, __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =False
for j in range(len(__UpperCamelCase ) ):
if temp[j] is True:
prime.append(j + low )
SCREAMING_SNAKE_CASE__ =high + 1
SCREAMING_SNAKE_CASE__ =min(high + end, __UpperCamelCase )
return prime
print(sieve(10**6))
| 588 | 1 |
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
_lowerCAmelCase: Dict = trt.Logger(trt.Logger.WARNING)
_lowerCAmelCase: Union[str, Any] = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
_lowerCAmelCase: Optional[Any] = logging.getLogger(__name__)
_lowerCAmelCase: str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=384,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=128,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=20,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=30,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
_lowerCAmelCase: Dict = parser.parse_args()
if args.tokenizer_name:
_lowerCAmelCase: Any = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
_lowerCAmelCase: str = args.per_device_eval_batch_size
_lowerCAmelCase: Optional[Any] = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
_lowerCAmelCase: List[str] = True
_lowerCAmelCase: Tuple = """temp_engine/bert-fp32.engine"""
if args.fpaa:
_lowerCAmelCase: Optional[Any] = """temp_engine/bert-fp16.engine"""
if args.inta:
_lowerCAmelCase: str = """temp_engine/bert-int8.engine"""
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
_lowerCAmelCase: Dict = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
_lowerCAmelCase: str = [network.get_input(i) for i in range(network.num_inputs)]
_lowerCAmelCase: Any = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
_lowerCAmelCase: int = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
_lowerCAmelCase: int = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
_lowerCAmelCase: Optional[int] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def _lowercase( __a : Any , __a : str , __a : Any , __a : Tuple , __a : int , __a : Union[str, Any] , __a : Optional[int] , __a : str ):
a__ =np.asarray(inputs['input_ids'] , dtype=np.intaa )
a__ =np.asarray(inputs['attention_mask'] , dtype=np.intaa )
a__ =np.asarray(inputs['token_type_ids'] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , __a )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , __a )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , __a )
# start time
a__ =time.time()
# Run inference
context.execute_async(
bindings=[int(__a ) for d_inp in d_inputs] + [int(__a ), int(__a )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(__a , __a , __a )
cuda.memcpy_dtoh_async(__a , __a , __a )
# Synchronize the stream and take time
stream.synchronize()
# end time
a__ =time.time()
a__ =end_time - start_time
a__ =(h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
_lowerCAmelCase: Any = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_lowerCAmelCase: Optional[int] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
_lowerCAmelCase: Dict = raw_datasets["""validation"""].column_names
_lowerCAmelCase: List[Any] = """question""" if """question""" in column_names else column_names[0]
_lowerCAmelCase: int = """context""" if """context""" in column_names else column_names[1]
_lowerCAmelCase: Any = """answers""" if """answers""" in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
_lowerCAmelCase: Any = tokenizer.padding_side == """right"""
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."""
)
_lowerCAmelCase: Optional[Any] = min(args.max_seq_length, tokenizer.model_max_length)
def _lowercase( __a : List[Any] ):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
a__ =[q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
a__ =tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='only_second' if pad_on_right else 'only_first' , max_length=__a , stride=args.doc_stride , return_overflowing_tokens=__a , return_offsets_mapping=__a , padding='max_length' , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
a__ =tokenized_examples.pop('overflow_to_sample_mapping' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
a__ =[]
for i in range(len(tokenized_examples['input_ids'] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
a__ =tokenized_examples.sequence_ids(__a )
a__ =1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
a__ =sample_mapping[i]
tokenized_examples["example_id"].append(examples['id'][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
a__ =[
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['offset_mapping'][i] )
]
return tokenized_examples
_lowerCAmelCase: int = raw_datasets["""validation"""]
# Validation Feature Creation
_lowerCAmelCase: List[str] = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
_lowerCAmelCase: Any = default_data_collator
_lowerCAmelCase: Any = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
_lowerCAmelCase: Dict = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def _lowercase( __a : List[Any] , __a : Optional[Any] , __a : Tuple , __a : Optional[Any]="eval" ):
# Post-processing: we match the start logits and end logits to answers in the original context.
a__ =postprocess_qa_predictions(
examples=__a , features=__a , predictions=__a , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=__a , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
a__ =[
{'''id''': k, '''prediction_text''': v, '''no_answer_probability''': 0.0} for k, v in predictions.items()
]
else:
a__ =[{'''id''': k, '''prediction_text''': v} for k, v in predictions.items()]
a__ =[{'''id''': ex['''id'''], '''answers''': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=__a , label_ids=__a )
_lowerCAmelCase: List[Any] = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def _lowercase( __a : Optional[Any] ):
return trt.volume(engine.get_binding_shape(__a ) ) * engine.get_binding_dtype(__a ).itemsize
# Allocate device memory for inputs and outputs.
_lowerCAmelCase: Any = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
_lowerCAmelCase: Any = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
_lowerCAmelCase: List[Any] = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
_lowerCAmelCase: str = cuda.mem_alloc(h_outputa.nbytes)
_lowerCAmelCase: Dict = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
_lowerCAmelCase: str = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(F""" Num examples = {len(eval_dataset)}""")
logger.info(F""" Batch size = {args.per_device_eval_batch_size}""")
_lowerCAmelCase: Optional[int] = 0.0
_lowerCAmelCase: Dict = 0
_lowerCAmelCase: Union[str, Any] = timeit.default_timer()
_lowerCAmelCase: Optional[int] = None
for step, batch in enumerate(eval_dataloader):
_lowerCAmelCase: Optional[Any] = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
_lowerCAmelCase: Any = outputs
_lowerCAmelCase: Optional[int] = torch.tensor(start_logits)
_lowerCAmelCase: Any = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
_lowerCAmelCase: Dict = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
_lowerCAmelCase: Tuple = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
_lowerCAmelCase: int = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
_lowerCAmelCase: str = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
_lowerCAmelCase: Tuple = nested_truncate(all_preds, len(eval_dataset))
_lowerCAmelCase: int = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_000 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_000))
logger.info('Total Number of Inference = %d', niter)
_lowerCAmelCase: Tuple = post_processing_function(eval_examples, eval_dataset, all_preds)
_lowerCAmelCase: str = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(F"""Evaluation metrics: {eval_metric}""")
| 20 |
'''simple docstring'''
def snake_case__ ( UpperCamelCase ) -> list:
_UpperCamelCase : Any = False
while is_sorted is False: # Until all the indices are traversed keep looping
_UpperCamelCase : List[str] = True
for i in range(0 ,len(UpperCamelCase ) - 1 ,2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
_UpperCamelCase, _UpperCamelCase : Dict = input_list[i + 1], input_list[i]
# swapping if elements not in order
_UpperCamelCase : int = False
for i in range(1 ,len(UpperCamelCase ) - 1 ,2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
_UpperCamelCase, _UpperCamelCase : Optional[Any] = input_list[i + 1], input_list[i]
# swapping if elements not in order
_UpperCamelCase : Optional[int] = False
return input_list
if __name__ == "__main__":
print("""Enter list to be sorted""")
_UpperCAmelCase : Optional[int] = [int(x) for x in input().split()]
# inputing elements of the list in one line
_UpperCAmelCase : Union[str, Any] = odd_even_sort(input_list)
print("""The sorted list is""")
print(sorted_list)
| 683 | 0 |
def SCREAMING_SNAKE_CASE_ ( __A : list , __A : int = 0 ) -> list:
"""simple docstring"""
a_ : Tuple = length or len(__A )
a_ : Union[str, Any] = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
a_ , a_ : Union[str, Any] = list_data[i + 1], list_data[i]
a_ : Any = True
return list_data if not swapped else bubble_sort(__A , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 443 |
UpperCAmelCase_ : Optional[int] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
UpperCAmelCase_ : List[str] = [{'type': 'code', 'content': INSTALL_CONTENT}]
UpperCAmelCase_ : int = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 443 | 1 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
lowerCAmelCase_ = Path(__file__).resolve().parents[3] / '''src'''
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(4_2)
lowerCAmelCase_ = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''}
lowerCAmelCase_ = '''zero2'''
lowerCAmelCase_ = '''zero3'''
lowerCAmelCase_ = [ZEROa, ZEROa]
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Tuple = parameterized.to_safe_name('''_'''.join(str(_UpperCamelCase ) for x in param.args ) )
return f'''{func.__name__}_{param_based_name}'''
# Cartesian-product of zero stages with models to test
lowerCAmelCase_ = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class __lowerCAmelCase ( _a ):
@parameterized.expand(__magic_name__ , name_func=__magic_name__ )
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> Dict:
'''simple docstring'''
self.run_and_check(
stage=__magic_name__ , model=__magic_name__ , distributed=__magic_name__ , fpaa=__magic_name__ , )
@require_torch_multi_gpu
@parameterized.expand(__magic_name__ , name_func=__magic_name__ )
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> int:
'''simple docstring'''
self.run_and_check(
stage=__magic_name__ , model=__magic_name__ , distributed=__magic_name__ , fpaa=__magic_name__ , )
@parameterized.expand(__magic_name__ , name_func=__magic_name__ )
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> List[Any]:
'''simple docstring'''
self.run_and_check(
stage=__magic_name__ , model=__magic_name__ , distributed=__magic_name__ , fpaa=__magic_name__ , )
@require_torch_multi_gpu
@parameterized.expand(__magic_name__ , name_func=__magic_name__ )
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
self.run_and_check(
stage=__magic_name__ , model=__magic_name__ , distributed=__magic_name__ , fpaa=__magic_name__ , )
def lowerCamelCase (self , __magic_name__ ) -> Dict:
'''simple docstring'''
pass
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ = 10 , __magic_name__ = True , __magic_name__ = True , __magic_name__ = True , ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = models[model]
snake_case_ : Optional[int] = self.run_trainer(
stage=__magic_name__ , model_name=__magic_name__ , eval_steps=__magic_name__ , num_train_epochs=1 , distributed=__magic_name__ , fpaa=__magic_name__ , )
self.do_checks(__magic_name__ )
return output_dir
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ = 10 , __magic_name__ = 1 , __magic_name__ = True , __magic_name__ = True , ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.get_auto_remove_tmp_dir('''./xxx''' , after=__magic_name__ )
snake_case_ : Optional[Any] = F'''
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(__magic_name__ )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
'''.split()
if fpaa:
args.extend(['''--fp16'''] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
snake_case_ : Tuple = F'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split()
snake_case_ : Dict = [F'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py''']
snake_case_ : Optional[int] = self.get_launcher(__magic_name__ )
snake_case_ : List[Any] = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__magic_name__ , env=self.get_env() )
return output_dir
def lowerCamelCase (self , __magic_name__=False ) -> str:
'''simple docstring'''
snake_case_ : Optional[Any] = min(2 , get_gpu_count() ) if distributed else 1
return F'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split()
| 60 |
"""simple docstring"""
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
lowercase__ : Optional[int] = '''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
lowercase__ : Dict = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
lowercase__ : List[Any] = dict(zip(vocab, range(len(vocab))))
lowercase__ : Optional[int] = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ : Optional[Any] = Path(tmpdirname)
lowercase__ : Any = build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
lowercase__ : Tuple = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
lowercase__ : List[Any] = build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
lowercase__ : int = FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
lowercase__ : Tuple = FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=10_00,
tgt_vocab_size=10_00,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
lowercase__ : Optional[int] = FSMTForConditionalGeneration(config)
print(f'num of params {tiny_model.num_parameters()}')
# Test
lowercase__ : Dict = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
lowercase__ : List[Any] = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f'Generated {mname_tiny}')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 123 | 0 |
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = ''''''
_lowerCamelCase = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_lowerCamelCase = None # compression type in fsspec. ex: "gzip"
_lowerCamelCase = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : Optional[Any] , UpperCAmelCase_ : str = "" , UpperCAmelCase_ : Optional[str] = None , UpperCAmelCase_ : Optional[dict] = None , **UpperCAmelCase_ : Dict):
super().__init__(self , **UpperCAmelCase_)
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
UpperCamelCase__ : int = fsspec.open(
UpperCAmelCase_ , mode='rb' , protocol=UpperCAmelCase_ , compression=self.compression , client_kwargs={
'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459
'trust_env': True, # Enable reading proxy env variables.
**(target_options or {}).pop('client_kwargs' , {}), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
UpperCamelCase__ : Tuple = os.path.basename(self.file.path.split('::')[0])
UpperCamelCase__ : Union[str, Any] = (
self.compressed_name[: self.compressed_name.rindex('.')]
if '.' in self.compressed_name
else self.compressed_name
)
UpperCamelCase__ : str = None
@classmethod
def __UpperCamelCase ( cls : Optional[int] , UpperCAmelCase_ : Dict):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(UpperCAmelCase_).lstrip('/')
def __UpperCamelCase ( self : Any):
if self.dir_cache is None:
UpperCamelCase__ : Tuple = {**self.file.fs.info(self.file.path), 'name': self.uncompressed_name}
UpperCamelCase__ : Dict = {f['name']: f}
def __UpperCamelCase ( self : str , UpperCAmelCase_ : str):
return self.file.open().read()
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : str = "rb" , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Any=None , **UpperCAmelCase_ : str , ):
UpperCamelCase__ : str = self._strip_protocol(UpperCAmelCase_)
if mode != "rb":
raise ValueError(F'Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'')
return self.file.open()
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = '''bz2'''
_lowerCamelCase = '''bz2'''
_lowerCamelCase = '''.bz2'''
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = '''gzip'''
_lowerCamelCase = '''gzip'''
_lowerCamelCase = '''.gz'''
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = '''lz4'''
_lowerCamelCase = '''lz4'''
_lowerCamelCase = '''.lz4'''
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = '''xz'''
_lowerCamelCase = '''xz'''
_lowerCamelCase = '''.xz'''
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = '''zstd'''
_lowerCamelCase = '''zstd'''
_lowerCamelCase = '''.zst'''
def __init__( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : str = "rb" , UpperCAmelCase_ : Optional[str] = None , UpperCAmelCase_ : Optional[dict] = None , UpperCAmelCase_ : int = DEFAULT_BLOCK_SIZE , **UpperCAmelCase_ : Dict , ):
super().__init__(
fo=UpperCAmelCase_ , mode=UpperCAmelCase_ , target_protocol=UpperCAmelCase_ , target_options=UpperCAmelCase_ , block_size=UpperCAmelCase_ , **UpperCAmelCase_ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
UpperCamelCase__ : str = self.file.__enter__
class __lowercase :
def __init__( self : Optional[int] , UpperCAmelCase_ : Dict):
UpperCamelCase__ : str = file_
def __enter__( self : str):
self._file.__enter__()
return self
def __exit__( self : Optional[int] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Any):
self._file.__exit__(*UpperCAmelCase_ , **UpperCAmelCase_)
def __iter__( self : Tuple):
return iter(self._file)
def __UpperCamelCase ( self : Any):
return next(self._file)
def __getattr__( self : str , UpperCAmelCase_ : Tuple):
return getattr(self._file , UpperCAmelCase_)
def fixed_enter(*UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Dict):
return WrappedFile(_enter(*UpperCAmelCase_ , **UpperCAmelCase_))
UpperCamelCase__ : str = fixed_enter
| 6 |
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowerCAmelCase__ = 3
def __UpperCAmelCase ( lowerCamelCase_) -> int:
print('Generating primitive root of p')
while True:
UpperCamelCase__ : Any = random.randrange(3 , lowerCamelCase_)
if pow(lowerCamelCase_ , 2 , lowerCamelCase_) == 1:
continue
if pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) == 1:
continue
return g
def __UpperCAmelCase ( lowerCamelCase_) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print('Generating prime p...')
UpperCamelCase__ : List[str] = rabin_miller.generate_large_prime(lowerCamelCase_) # select large prime number.
UpperCamelCase__ : Any = primitive_root(lowerCamelCase_) # one primitive root on modulo p.
UpperCamelCase__ : Union[str, Any] = random.randrange(3 , lowerCamelCase_) # private_key -> have to be greater than 2 for safety.
UpperCamelCase__ : Dict = cryptomath.find_mod_inverse(pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) , lowerCamelCase_)
UpperCamelCase__ : List[Any] = (key_size, e_a, e_a, p)
UpperCamelCase__ : Optional[Any] = (key_size, d)
return public_key, private_key
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> None:
if os.path.exists(f'{name}_pubkey.txt') or os.path.exists(f'{name}_privkey.txt'):
print('\nWARNING:')
print(
f'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'
'Use a different name or delete these files and re-run this program.')
sys.exit()
UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = generate_key(lowerCamelCase_)
print(f'\nWriting public key to file {name}_pubkey.txt...')
with open(f'{name}_pubkey.txt' , 'w') as fo:
fo.write(f'{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}')
print(f'Writing private key to file {name}_privkey.txt...')
with open(f'{name}_privkey.txt' , 'w') as fo:
fo.write(f'{private_key[0]},{private_key[1]}')
def __UpperCAmelCase ( ) -> None:
print('Making key files...')
make_key_files('elgamal' , 2_048)
print('Key files generation successful')
if __name__ == "__main__":
main()
| 6 | 1 |
"""simple docstring"""
import baseaa
def snake_case ( lowerCAmelCase_ ) -> bytes:
return baseaa.baaencode(string.encode('''utf-8''' ) )
def snake_case ( lowerCAmelCase_ ) -> str:
return baseaa.baadecode(lowerCAmelCase_ ).decode('''utf-8''' )
if __name__ == "__main__":
snake_case = '''Hello World!'''
snake_case = baseaa_encode(test)
print(encoded)
snake_case = baseaa_decode(encoded)
print(decoded)
| 103 |
'''simple docstring'''
def A__ ( UpperCAmelCase_ = 1_0_0_0 ):
_UpperCamelCase : List[str] = 3
_UpperCamelCase : Any = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 1_5 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 195 | 0 |
"""simple docstring"""
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase = LayoutLMTokenizer
_lowerCamelCase = LayoutLMTokenizerFast
_lowerCamelCase = True
_lowerCamelCase = True
def UpperCamelCase__ ( self ) -> int:
super().setUp()
A = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def UpperCamelCase__ ( self ,**lowerCamelCase_ ) -> Union[str, Any]:
return LayoutLMTokenizer.from_pretrained(self.tmpdirname ,**lowerCamelCase_ )
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> List[str]:
A = """UNwant\u00E9d,running"""
A = """unwanted, running"""
return input_text, output_text
def UpperCamelCase__ ( self ) -> List[Any]:
A = self.tokenizer_class(self.vocab_file )
A = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(lowerCamelCase_ ,["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) ,[7, 4, 5, 1_0, 8, 9] )
def UpperCamelCase__ ( self ) -> Tuple:
pass
| 255 |
"""simple docstring"""
UpperCAmelCase =256
# Modulus to hash a string
UpperCAmelCase =1_000_003
def _A ( _a : str , _a : str ):
"""simple docstring"""
A = len(_a )
A = len(_a )
if p_len > t_len:
return False
A = 0
A = 0
A = 1
# Calculating the hash of pattern and substring of text
for i in range(_a ):
A = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
A = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
A = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
A = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def _A ( ):
"""simple docstring"""
A = """abc1abc12"""
A = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
A = """alskfjaldsk23adsfabcabc"""
assert rabin_karp(_a , _a ) and not rabin_karp(_a , _a )
# Test 2)
A = """ABABX"""
A = """ABABZABABYABABX"""
assert rabin_karp(_a , _a )
# Test 3)
A = """AAAB"""
A = """ABAAAAAB"""
assert rabin_karp(_a , _a )
# Test 4)
A = """abcdabcy"""
A = """abcxabcdabxabcdabcdabcy"""
assert rabin_karp(_a , _a )
# Test 5)
A = """Lü"""
A = """Lüsai"""
assert rabin_karp(_a , _a )
A = """Lue"""
assert not rabin_karp(_a , _a )
print("""Success.""" )
if __name__ == "__main__":
test_rabin_karp()
| 255 | 1 |
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Union[str, Any]:
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError("The length of profit and weight must be same." )
if max_weight <= 0:
raise ValueError("max_weight must greater than zero." )
if any(p < 0 for p in profit ):
raise ValueError("Profit can not be negative." )
if any(w < 0 for w in weight ):
raise ValueError("Weight can not be negative." )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
UpperCamelCase_ = [p / w for p, w in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )]
# Creating a copy of the list and sorting profit/weight in ascending order
UpperCamelCase_ = sorted(SCREAMING_SNAKE_CASE_ )
# declaring useful variables
UpperCamelCase_ = len(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = 0
UpperCamelCase_ = 0
UpperCamelCase_ = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
UpperCamelCase_ = sorted_profit_by_weight[length - i - 1]
UpperCamelCase_ = profit_by_weight.index(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"""Input profits, weights, and then max_weight (all positive ints) separated by """
"""spaces."""
)
SCREAMING_SNAKE_CASE :List[str] = [int(x) for x in input("""Input profits separated by spaces: """).split()]
SCREAMING_SNAKE_CASE :Optional[int] = [int(x) for x in input("""Input weights separated by spaces: """).split()]
SCREAMING_SNAKE_CASE :Union[str, Any] = int(input("""Max weight allowed: """))
# Function Call
calc_profit(profit, weight, max_weight)
| 628 |
"""simple docstring"""
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE ( UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : str ):
def get_masked_lm_array(UpperCamelCase : str ):
A__ = F"""masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
A__ = tf.train.load_variable(UpperCamelCase , UpperCamelCase )
if "kernel" in name:
A__ = array.transpose()
return torch.from_numpy(UpperCamelCase )
def get_encoder_array(UpperCamelCase : str ):
A__ = F"""encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
A__ = tf.train.load_variable(UpperCamelCase , UpperCamelCase )
if "kernel" in name:
A__ = array.transpose()
return torch.from_numpy(UpperCamelCase )
def get_encoder_layer_array(UpperCamelCase : int , UpperCamelCase : str ):
A__ = F"""encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
A__ = tf.train.load_variable(UpperCamelCase , UpperCamelCase )
if "kernel" in name:
A__ = array.transpose()
return torch.from_numpy(UpperCamelCase )
def get_encoder_attention_layer_array(UpperCamelCase : int , UpperCamelCase : str , UpperCamelCase : int ):
A__ = F"""encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
A__ = tf.train.load_variable(UpperCamelCase , UpperCamelCase )
A__ = array.reshape(UpperCamelCase )
if "kernel" in name:
A__ = array.transpose()
return torch.from_numpy(UpperCamelCase )
print(F"""Loading model based on config from {config_path}...""" )
A__ = BertConfig.from_json_file(UpperCamelCase )
A__ = BertForMaskedLM(UpperCamelCase )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
A__ = model.bert.encoder.layer[layer_index]
# Self-attention
A__ = layer.attention.self
A__ = get_encoder_attention_layer_array(
UpperCamelCase , """_query_dense/kernel""" , self_attn.query.weight.data.shape )
A__ = get_encoder_attention_layer_array(
UpperCamelCase , """_query_dense/bias""" , self_attn.query.bias.data.shape )
A__ = get_encoder_attention_layer_array(
UpperCamelCase , """_key_dense/kernel""" , self_attn.key.weight.data.shape )
A__ = get_encoder_attention_layer_array(
UpperCamelCase , """_key_dense/bias""" , self_attn.key.bias.data.shape )
A__ = get_encoder_attention_layer_array(
UpperCamelCase , """_value_dense/kernel""" , self_attn.value.weight.data.shape )
A__ = get_encoder_attention_layer_array(
UpperCamelCase , """_value_dense/bias""" , self_attn.value.bias.data.shape )
# Self-attention Output
A__ = layer.attention.output
A__ = get_encoder_attention_layer_array(
UpperCamelCase , """_output_dense/kernel""" , self_output.dense.weight.data.shape )
A__ = get_encoder_attention_layer_array(
UpperCamelCase , """_output_dense/bias""" , self_output.dense.bias.data.shape )
A__ = get_encoder_layer_array(UpperCamelCase , """_attention_layer_norm/gamma""" )
A__ = get_encoder_layer_array(UpperCamelCase , """_attention_layer_norm/beta""" )
# Intermediate
A__ = layer.intermediate
A__ = get_encoder_layer_array(UpperCamelCase , """_intermediate_dense/kernel""" )
A__ = get_encoder_layer_array(UpperCamelCase , """_intermediate_dense/bias""" )
# Output
A__ = layer.output
A__ = get_encoder_layer_array(UpperCamelCase , """_output_dense/kernel""" )
A__ = get_encoder_layer_array(UpperCamelCase , """_output_dense/bias""" )
A__ = get_encoder_layer_array(UpperCamelCase , """_output_layer_norm/gamma""" )
A__ = get_encoder_layer_array(UpperCamelCase , """_output_layer_norm/beta""" )
# Embeddings
A__ = get_encoder_array("""_position_embedding_layer/embeddings""" )
A__ = get_encoder_array("""_type_embedding_layer/embeddings""" )
A__ = get_encoder_array("""_embedding_norm_layer/gamma""" )
A__ = get_encoder_array("""_embedding_norm_layer/beta""" )
# LM Head
A__ = model.cls.predictions.transform
A__ = get_masked_lm_array("""dense/kernel""" )
A__ = get_masked_lm_array("""dense/bias""" )
A__ = get_masked_lm_array("""layer_norm/gamma""" )
A__ = get_masked_lm_array("""layer_norm/beta""" )
A__ = get_masked_lm_array("""embedding_table""" )
# Pooling
A__ = BertPooler(config=UpperCamelCase )
A__ = get_encoder_array("""_pooler_layer/kernel""" )
A__ = get_encoder_array("""_pooler_layer/bias""" )
# Export final model
model.save_pretrained(UpperCamelCase )
# Integration test - should load without any errors ;)
A__ = BertForMaskedLM.from_pretrained(UpperCamelCase )
print(new_model.eval() )
print("""Model conversion was done sucessfully!""" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model.",
)
lowerCamelCase__ = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 574 | 0 |
'''simple docstring'''
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
A : Tuple = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
A : List[str] = direct_transformers_import(PATH_TO_TRANSFORMERS)
A : Optional[int] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
A : Optional[Any] = re.compile(r"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
A : Tuple = {
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def snake_case_ ( a__ : List[str] ):
"""simple docstring"""
__lowercase = None
# source code of `config_class`
__lowercase = inspect.getsource(a__ )
__lowercase = _re_checkpoint.findall(a__ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("""/""" ):
__lowercase = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
__lowercase = f'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
__lowercase = ckpt_name
break
return checkpoint
def snake_case_ ( ):
"""simple docstring"""
__lowercase = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
__lowercase = get_checkpoint_from_config_class(a__ )
__lowercase = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(a__ )
if len(a__ ) > 0:
__lowercase = """\n""".join(sorted(a__ ) )
raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 163 |
'''simple docstring'''
def snake_case_ ( a__ : int ):
"""simple docstring"""
if bit_count < 0:
raise ValueError("""The given input must be positive""" )
# get the generated string sequence
__lowercase = gray_code_sequence_string(a__ )
#
# convert them to integers
for i in range(len(a__ ) ):
__lowercase = int(sequence[i] ,2 )
return sequence
def snake_case_ ( a__ : int ):
"""simple docstring"""
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
__lowercase = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
__lowercase = gray_code_sequence_string(bit_count - 1 )
__lowercase = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
__lowercase = """0""" + smaller_sequence[i]
sequence.append(a__ )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
__lowercase = """1""" + smaller_sequence[i]
sequence.append(a__ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 163 | 1 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
__lowerCamelCase : Optional[Any] = logging.getLogger(__name__)
@dataclass(frozen=A__ )
class a__ :
A = 42
A = 42
A = None
A = None
A = None
@dataclass(frozen=A__ )
class a__ :
A = 42
A = None
A = None
A = None
A = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class a__ ( A__ ):
A = 42
def __init__( self : Optional[int],_A : str,_A : PreTrainedTokenizer,_A : str,_A : Optional[int] = None,_A : List[Any]=False,_A : bool = False,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = hans_processors[task]()
SCREAMING_SNAKE_CASE_ : Tuple = os.path.join(
_A,"cached_{}_{}_{}_{}".format(
"dev" if evaluate else "train",tokenizer.__class__.__name__,str(_A ),_A,),)
SCREAMING_SNAKE_CASE_ : str = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = label_list[2], label_list[1]
SCREAMING_SNAKE_CASE_ : Tuple = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE_ : str = cached_features_file + ".lock"
with FileLock(_A ):
if os.path.exists(_A ) and not overwrite_cache:
logger.info(F'Loading features from cached file {cached_features_file}' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.load(_A )
else:
logger.info(F'Creating features from dataset file at {data_dir}' )
SCREAMING_SNAKE_CASE_ : Optional[int] = (
processor.get_dev_examples(_A ) if evaluate else processor.get_train_examples(_A )
)
logger.info("Training examples: %s",len(_A ) )
SCREAMING_SNAKE_CASE_ : Any = hans_convert_examples_to_features(_A,_A,_A,_A )
logger.info("Saving features into cached file %s",_A )
torch.save(self.features,_A )
def __len__( self : List[str] ):
"""simple docstring"""
return len(self.features )
def __getitem__( self : Optional[Any],_A : Tuple ):
"""simple docstring"""
return self.features[i]
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
return self.label_list
if is_tf_available():
import tensorflow as tf
class a__ :
A = 42
def __init__( self : Optional[int],_A : str,_A : PreTrainedTokenizer,_A : str,_A : Optional[int] = 128,_A : Union[str, Any]=False,_A : bool = False,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = hans_processors[task]()
SCREAMING_SNAKE_CASE_ : Tuple = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = label_list[2], label_list[1]
SCREAMING_SNAKE_CASE_ : List[str] = label_list
SCREAMING_SNAKE_CASE_ : Any = processor.get_dev_examples(_A ) if evaluate else processor.get_train_examples(_A )
SCREAMING_SNAKE_CASE_ : Dict = hans_convert_examples_to_features(_A,_A,_A,_A )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ),desc="convert examples to features" ):
if ex_index % 1_0000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(_A )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.data.Dataset.from_generator(
_A,(
{
"example_id": tf.intaa,
"input_ids": tf.intaa,
"attention_mask": tf.intaa,
"token_type_ids": tf.intaa,
},
tf.intaa,
),(
{
"example_id": tf.TensorShape([] ),
"input_ids": tf.TensorShape([None, None] ),
"attention_mask": tf.TensorShape([None, None] ),
"token_type_ids": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
),)
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
return self.dataset
def __len__( self : List[str] ):
"""simple docstring"""
return len(self.features )
def __getitem__( self : Optional[Any],_A : str ):
"""simple docstring"""
return self.features[i]
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return self.label_list
class a__ ( A__ ):
def __UpperCamelCase ( self : Dict,_A : Optional[int] ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(_A,"heuristics_train_set.txt" ) ),"train" )
def __UpperCamelCase ( self : Dict,_A : Any ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(_A,"heuristics_evaluation_set.txt" ) ),"dev" )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
return ["contradiction", "entailment", "neutral"]
def __UpperCamelCase ( self : Union[str, Any],_A : str,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
for i, line in enumerate(_A ):
if i == 0:
continue
SCREAMING_SNAKE_CASE_ : Optional[Any] = "%s-%s" % (set_type, line[0])
SCREAMING_SNAKE_CASE_ : Union[str, Any] = line[5]
SCREAMING_SNAKE_CASE_ : List[Any] = line[6]
SCREAMING_SNAKE_CASE_ : Any = line[7][2:] if line[7].startswith("ex" ) else line[7]
SCREAMING_SNAKE_CASE_ : Tuple = line[0]
examples.append(InputExample(guid=_A,text_a=_A,text_b=_A,label=_A,pairID=_A ) )
return examples
def _snake_case ( lowerCAmelCase : List[InputExample] , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : PreTrainedTokenizer , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = {label: i for i, label in enumerate(lowerCAmelCase )}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
for ex_index, example in tqdm.tqdm(enumerate(lowerCAmelCase ) , desc="convert examples to features" ):
if ex_index % 1_0_0_0_0 == 0:
logger.info("Writing example %d" % (ex_index) )
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer(
example.text_a , example.text_b , add_special_tokens=lowerCAmelCase , max_length=lowerCAmelCase , padding="max_length" , truncation=lowerCAmelCase , return_overflowing_tokens=lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ : int = label_map[example.label] if example.label in label_map else 0
SCREAMING_SNAKE_CASE_ : Union[str, Any] = int(example.pairID )
features.append(InputFeatures(**lowerCAmelCase , label=lowerCAmelCase , pairID=lowerCAmelCase ) )
for i, example in enumerate(examples[:5] ):
logger.info("*** Example ***" )
logger.info(f'guid: {example}' )
logger.info(f'features: {features[i]}' )
return features
__lowerCamelCase : Tuple = {
'''hans''': 3,
}
__lowerCamelCase : Union[str, Any] = {
'''hans''': HansProcessor,
}
| 216 | from __future__ import annotations
def _snake_case ( lowerCAmelCase : int | float | str , lowerCAmelCase : int | float | str ):
"""simple docstring"""
if nth_term == "":
return [""]
SCREAMING_SNAKE_CASE_ : Tuple = int(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = int(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : list[str] = []
for temp in range(int(lowerCAmelCase ) ):
series.append(f'1 / {pow(temp + 1 , int(lowerCAmelCase ) )}' if series else "1" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCamelCase : List[Any] = int(input('''Enter the last number (nth term) of the P-Series'''))
__lowerCamelCase : int = int(input('''Enter the power for P-Series'''))
print('''Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p''')
print(p_series(nth_term, power))
| 216 | 1 |
'''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def A_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = 1.5
_lowerCamelCase : List[str] = int(factor * num_class_images )
_lowerCamelCase : Optional[Any] = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=_lowercase , aesthetic_weight=0.1 )
os.makedirs(F'{class_data_dir}/images' , exist_ok=_lowercase )
if len(list(Path(F'{class_data_dir}/images' ).iterdir() ) ) >= num_class_images:
return
while True:
_lowerCamelCase : List[Any] = client.query(text=_lowercase )
if len(_lowercase ) >= factor * num_class_images or num_images > 1E4:
break
else:
_lowerCamelCase : Optional[int] = int(factor * num_images )
_lowerCamelCase : str = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=_lowercase , aesthetic_weight=0.1 , )
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : str = 0
_lowerCamelCase : Any = tqdm(desc="downloading real regularization images" , total=_lowercase )
with open(F'{class_data_dir}/caption.txt' , "w" ) as fa, open(F'{class_data_dir}/urls.txt' , "w" ) as fa, open(
F'{class_data_dir}/images.txt' , "w" ) as fa:
while total < num_class_images:
_lowerCamelCase : Optional[Any] = class_images[count]
count += 1
try:
_lowerCamelCase : str = requests.get(images["url"] )
if img.status_code == 200:
_lowerCamelCase : int = Image.open(BytesIO(img.content ) )
with open(F'{class_data_dir}/images/{total}.jpg' , "wb" ) as f:
f.write(img.content )
fa.write(images["caption"] + "\n" )
fa.write(images["url"] + "\n" )
fa.write(F'{class_data_dir}/images/{total}.jpg' + "\n" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = argparse.ArgumentParser("" , add_help=_lowercase )
parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=_lowercase , type=_lowercase )
parser.add_argument("--class_data_dir" , help="path to save images" , required=_lowercase , type=_lowercase )
parser.add_argument("--num_class_images" , help="number of images to download" , default=200 , type=_lowercase )
return parser.parse_args()
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images) | 708 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
if "img_encoder.pos_embed" in name:
_lowerCamelCase : List[str] = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" )
if "img_encoder.patch_embed.proj" in name:
_lowerCamelCase : Dict = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" )
if "img_encoder.patch_embed.norm" in name:
_lowerCamelCase : Optional[int] = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" )
if "img_encoder.layers" in name:
_lowerCamelCase : Optional[Any] = name.replace("img_encoder.layers" , "vision_model.encoder.stages" )
if "blocks" in name and "res" not in name:
_lowerCamelCase : List[Any] = name.replace("blocks" , "layers" )
if "attn" in name and "pre_assign" not in name:
_lowerCamelCase : str = name.replace("attn" , "self_attn" )
if "proj" in name and "self_attn" in name and "text" not in name:
_lowerCamelCase : Tuple = name.replace("proj" , "out_proj" )
if "pre_assign_attn.attn.proj" in name:
_lowerCamelCase : Union[str, Any] = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" )
if "norm1" in name:
_lowerCamelCase : Optional[Any] = name.replace("norm1" , "layer_norm1" )
if "norm2" in name and "pre_assign" not in name:
_lowerCamelCase : Optional[int] = name.replace("norm2" , "layer_norm2" )
if "img_encoder.norm" in name:
_lowerCamelCase : Dict = name.replace("img_encoder.norm" , "vision_model.layernorm" )
# text encoder
if "text_encoder.token_embedding" in name:
_lowerCamelCase : List[str] = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" )
if "text_encoder.positional_embedding" in name:
_lowerCamelCase : Optional[Any] = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "text_encoder.transformer.resblocks." in name:
_lowerCamelCase : Any = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." )
if "ln_1" in name:
_lowerCamelCase : str = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
_lowerCamelCase : Optional[int] = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
_lowerCamelCase : List[Any] = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
_lowerCamelCase : Tuple = name.replace("c_proj" , "fc2" )
if "text_encoder" in name:
_lowerCamelCase : Tuple = name.replace("text_encoder" , "text_model" )
if "ln_final" in name:
_lowerCamelCase : Optional[Any] = name.replace("ln_final" , "final_layer_norm" )
# projection layers
if "img_projector.linear_hidden." in name:
_lowerCamelCase : int = name.replace("img_projector.linear_hidden." , "visual_projection." )
if "img_projector.linear_out." in name:
_lowerCamelCase : Tuple = name.replace("img_projector.linear_out." , "visual_projection.3." )
if "text_projector.linear_hidden" in name:
_lowerCamelCase : Optional[Any] = name.replace("text_projector.linear_hidden" , "text_projection" )
if "text_projector.linear_out" in name:
_lowerCamelCase : str = name.replace("text_projector.linear_out" , "text_projection.3" )
return name
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : str ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_lowerCamelCase : Optional[int] = orig_state_dict.pop(_lowerCAmelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCamelCase : Any = key.split("." )
_lowerCamelCase , _lowerCamelCase : List[str] = int(key_split[2] ), int(key_split[4] )
_lowerCamelCase : List[Any] = config.vision_config.hidden_size
if "weight" in key:
_lowerCamelCase : List[Any] = val[:dim, :]
_lowerCamelCase : int = val[dim : dim * 2, :]
_lowerCamelCase : str = val[-dim:, :]
else:
_lowerCamelCase : Optional[int] = val[:dim]
_lowerCamelCase : str = val[dim : dim * 2]
_lowerCamelCase : Tuple = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCamelCase : Optional[Any] = key.split("." )
_lowerCamelCase : int = int(key_split[3] )
_lowerCamelCase : Tuple = config.text_config.hidden_size
if "weight" in key:
_lowerCamelCase : List[Any] = val[:dim, :]
_lowerCamelCase : str = val[
dim : dim * 2, :
]
_lowerCamelCase : Optional[int] = val[-dim:, :]
else:
_lowerCamelCase : str = val[:dim]
_lowerCamelCase : Optional[Any] = val[dim : dim * 2]
_lowerCamelCase : Optional[int] = val[-dim:]
else:
_lowerCamelCase : int = rename_key(_lowerCAmelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
_lowerCamelCase : Any = val.squeeze_()
else:
_lowerCamelCase : Union[str, Any] = val
return orig_state_dict
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Any = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str="groupvit-gcc-yfcc" , _lowerCAmelCase : List[str]=False ):
"""simple docstring"""
_lowerCamelCase : List[str] = GroupViTConfig()
_lowerCamelCase : Union[str, Any] = GroupViTModel(_lowerCAmelCase ).eval()
_lowerCamelCase : Any = torch.load(_lowerCAmelCase , map_location="cpu" )["model"]
_lowerCamelCase : Union[str, Any] = convert_state_dict(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : Tuple = model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(_lowerCAmelCase ) == 0)
# verify result
_lowerCamelCase : List[Any] = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" )
_lowerCamelCase : List[str] = prepare_img()
_lowerCamelCase : Any = processor(text=["a photo of a cat", "a photo of a dog"] , images=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="pt" )
with torch.no_grad():
_lowerCamelCase : List[str] = model(**_lowerCAmelCase )
if model_name == "groupvit-gcc-yfcc":
_lowerCamelCase : List[Any] = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
_lowerCamelCase : Optional[Any] = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(F'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , _lowerCAmelCase , atol=1E-3 )
processor.save_pretrained(_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
print("Successfully saved processor and model to" , _lowerCAmelCase )
if push_to_hub:
print("Pushing to the hub..." )
processor.push_to_hub(_lowerCAmelCase , organization="nielsr" )
model.push_to_hub(_lowerCAmelCase , organization="nielsr" )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')
parser.add_argument(
'--model_name',
default='groupvit-gccy-fcc',
type=str,
help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.',
)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 11 | 0 |
from sklearn.metrics import fa_score
import datasets
__a = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n'
__a = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n'
__a = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__( datasets.Metric ):
"""simple docstring"""
def _lowercase ( self : List[Any] ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1 , SCREAMING_SNAKE_CASE_ : Tuple="binary" , SCREAMING_SNAKE_CASE_ : Any=None ) -> Any:
lowercase_ = fa_score(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , pos_label=SCREAMING_SNAKE_CASE_ , average=SCREAMING_SNAKE_CASE_ , sample_weight=SCREAMING_SNAKE_CASE_ )
return {"f1": float(SCREAMING_SNAKE_CASE_ ) if score.size == 1 else score}
| 97 |
from __future__ import annotations
def a ( snake_case__: Optional[int] , snake_case__: Optional[int] , snake_case__: Any , snake_case__: Optional[int] ): # noqa: E741
'''simple docstring'''
while r - l > 1:
lowercase_ = (l + r) // 2
if v[m] >= key:
lowercase_ = m
else:
lowercase_ = m # noqa: E741
return r
def a ( snake_case__: list[int] ):
'''simple docstring'''
if len(snake_case__ ) == 0:
return 0
lowercase_ = [0] * len(snake_case__ )
lowercase_ = 1
lowercase_ = v[0]
for i in range(1 , len(snake_case__ ) ):
if v[i] < tail[0]:
lowercase_ = v[i]
elif v[i] > tail[length - 1]:
lowercase_ = v[i]
length += 1
else:
lowercase_ = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 97 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A ={
'''configuration_clap''': [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapAudioConfig''',
'''ClapConfig''',
'''ClapTextConfig''',
],
'''processing_clap''': ['''ClapProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapModel''',
'''ClapPreTrainedModel''',
'''ClapTextModel''',
'''ClapTextModelWithProjection''',
'''ClapAudioModel''',
'''ClapAudioModelWithProjection''',
]
__A =['''ClapFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
__A =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 706 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
__A =get_tests_dir("fixtures")
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = mock.Mock()
__UpperCAmelCase : int = 5_00
__UpperCAmelCase : Optional[int] = {}
__UpperCAmelCase : List[Any] = HTTPError
__UpperCAmelCase : Any = {}
# Download this model to make sure it's in the cache.
__UpperCAmelCase : int = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=a_ ) as mock_head:
__UpperCAmelCase : Any = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# This check we did call the fake head request
mock_head.assert_called()
def snake_case__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json''' )
@is_staging_test
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def snake_case__ ( cls : int ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = TOKEN
HfFolder.save_token(a_ )
@classmethod
def snake_case__ ( cls : List[Any] ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='''test-feature-extractor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-feature-extractor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-feature-extractor''' )
except HTTPError:
pass
def snake_case__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = WavaVecaFeatureExtractor.from_pretrained(a_ )
feature_extractor.push_to_hub('''test-feature-extractor''' , use_auth_token=self._token )
__UpperCAmelCase : str = WavaVecaFeatureExtractor.from_pretrained(F'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(a_ , getattr(a_ , a_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
a_ , repo_id='''test-feature-extractor''' , push_to_hub=a_ , use_auth_token=self._token )
__UpperCAmelCase : List[str] = WavaVecaFeatureExtractor.from_pretrained(F'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(a_ , getattr(a_ , a_ ) )
def snake_case__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : Dict = WavaVecaFeatureExtractor.from_pretrained(a_ )
feature_extractor.push_to_hub('''valid_org/test-feature-extractor''' , use_auth_token=self._token )
__UpperCAmelCase : Any = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(a_ , getattr(a_ , a_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
a_ , repo_id='''valid_org/test-feature-extractor-org''' , push_to_hub=a_ , use_auth_token=self._token )
__UpperCAmelCase : Optional[int] = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor-org''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(a_ , getattr(a_ , a_ ) )
def snake_case__ ( self : List[str] ):
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
__UpperCAmelCase : List[Any] = CustomFeatureExtractor.from_pretrained(a_ )
feature_extractor.push_to_hub('''test-dynamic-feature-extractor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor'''} , )
__UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(
F'{USER}/test-dynamic-feature-extractor' , trust_remote_code=a_ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , '''CustomFeatureExtractor''' )
| 241 | 0 |
"""simple docstring"""
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class _lowerCamelCase ( a_ ):
_lowerCamelCase :torch.FloatTensor
_lowerCamelCase :Optional[torch.FloatTensor] = None
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase=0.999 , __UpperCAmelCase="cosine" , ) -> Dict:
if alpha_transform_type == "cosine":
def alpha_bar_fn(__UpperCAmelCase ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__UpperCAmelCase ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
lowerCAmelCase__ : Tuple = []
for i in range(__UpperCAmelCase ):
lowerCAmelCase__ : List[Any] = i / num_diffusion_timesteps
lowerCAmelCase__ : int = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__UpperCAmelCase ) / alpha_bar_fn(__UpperCAmelCase ) , __UpperCAmelCase ) )
return torch.tensor(__UpperCAmelCase , dtype=torch.floataa )
class _lowerCamelCase ( a_ , a_ ):
_lowerCamelCase :str = 1
@register_to_config
def __init__( self : str , UpperCamelCase : int = 10_00 , UpperCamelCase : float = 0.0001 , UpperCamelCase : float = 0.02 , UpperCamelCase : str = "linear" , UpperCamelCase : Optional[Union[np.ndarray, List[float]]] = None , UpperCamelCase : bool = True , UpperCamelCase : bool = True , UpperCamelCase : int = 0 , UpperCamelCase : str = "epsilon" , UpperCamelCase : float = 1.0 , **UpperCamelCase : List[Any] , ) -> List[Any]:
"""simple docstring"""
if kwargs.get("""set_alpha_to_one""" , UpperCamelCase ) is not None:
lowerCAmelCase__ : Tuple = (
"""The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead."""
)
deprecate("""set_alpha_to_one""" , """1.0.0""" , UpperCamelCase , standard_warn=UpperCamelCase )
lowerCAmelCase__ : Dict = kwargs["""set_alpha_to_one"""]
if trained_betas is not None:
lowerCAmelCase__ : int = torch.tensor(UpperCamelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
lowerCAmelCase__ : Optional[int] = torch.linspace(UpperCamelCase , UpperCamelCase , UpperCamelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCAmelCase__ : str = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , UpperCamelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCAmelCase__ : str = betas_for_alpha_bar(UpperCamelCase )
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" )
lowerCAmelCase__ : Optional[int] = 1.0 - self.betas
lowerCAmelCase__ : Union[str, Any] = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
lowerCAmelCase__ : Any = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
lowerCAmelCase__ : Union[str, Any] = 1.0
# setable values
lowerCAmelCase__ : List[str] = None
lowerCAmelCase__ : Union[str, Any] = torch.from_numpy(np.arange(0 , UpperCamelCase ).copy().astype(np.intaa ) )
def _lowerCAmelCase ( self : int , UpperCamelCase : torch.FloatTensor , UpperCamelCase : Optional[int] = None ) -> torch.FloatTensor:
"""simple docstring"""
return sample
def _lowerCAmelCase ( self : List[Any] , UpperCamelCase : int , UpperCamelCase : Union[str, torch.device] = None ) -> Optional[int]:
"""simple docstring"""
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
f"""`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"""
f""" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"""
f""" maximal {self.config.num_train_timesteps} timesteps.""" )
lowerCAmelCase__ : Optional[int] = num_inference_steps
lowerCAmelCase__ : Dict = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase__ : Dict = (np.arange(0 , UpperCamelCase ) * step_ratio).round().copy().astype(np.intaa )
lowerCAmelCase__ : Tuple = torch.from_numpy(UpperCamelCase ).to(UpperCamelCase )
self.timesteps += self.config.steps_offset
def _lowerCAmelCase ( self : Tuple , UpperCamelCase : torch.FloatTensor , UpperCamelCase : int , UpperCamelCase : torch.FloatTensor , UpperCamelCase : float = 0.0 , UpperCamelCase : bool = False , UpperCamelCase : Optional[torch.FloatTensor] = None , UpperCamelCase : bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
"""simple docstring"""
# 1. get previous step value (=t+1)
lowerCAmelCase__ : int = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
lowerCAmelCase__ : Dict = self.alphas_cumprod[timestep]
lowerCAmelCase__ : Optional[int] = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
lowerCAmelCase__ : Optional[Any] = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
lowerCAmelCase__ : Dict = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
lowerCAmelCase__ : Union[str, Any] = model_output
elif self.config.prediction_type == "sample":
lowerCAmelCase__ : Union[str, Any] = model_output
lowerCAmelCase__ : Dict = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
lowerCAmelCase__ : Tuple = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
lowerCAmelCase__ : Optional[Any] = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"""
""" `v_prediction`""" )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
lowerCAmelCase__ : Any = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCAmelCase__ : Tuple = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCAmelCase__ : Union[str, Any] = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=UpperCamelCase , pred_original_sample=UpperCamelCase )
def __len__( self : Optional[int] ) -> str:
"""simple docstring"""
return self.config.num_train_timesteps
| 299 |
"""simple docstring"""
from __future__ import annotations
import queue
class _lowerCamelCase :
def __init__( self : Optional[int] , UpperCamelCase : List[Any] ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = data
lowerCAmelCase__ : Union[str, Any] = None
lowerCAmelCase__ : Optional[int] = None
def lowercase_ ( ) -> TreeNode:
print("""\n********Press N to stop entering at any point of time********\n""" )
lowerCAmelCase__ : Any = input("""Enter the value of the root node: """ ).strip().lower()
lowerCAmelCase__ : queue.Queue = queue.Queue()
lowerCAmelCase__ : Union[str, Any] = TreeNode(int(__UpperCAmelCase ) )
q.put(__UpperCAmelCase )
while not q.empty():
lowerCAmelCase__ : Dict = q.get()
lowerCAmelCase__ : Optional[Any] = f"""Enter the left node of {node_found.data}: """
lowerCAmelCase__ : List[str] = input(__UpperCAmelCase ).strip().lower() or """n"""
if check == "n":
return tree_node
lowerCAmelCase__ : Dict = TreeNode(int(__UpperCAmelCase ) )
lowerCAmelCase__ : Union[str, Any] = left_node
q.put(__UpperCAmelCase )
lowerCAmelCase__ : str = f"""Enter the right node of {node_found.data}: """
lowerCAmelCase__ : str = input(__UpperCAmelCase ).strip().lower() or """n"""
if check == "n":
return tree_node
lowerCAmelCase__ : List[Any] = TreeNode(int(__UpperCAmelCase ) )
lowerCAmelCase__ : List[str] = right_node
q.put(__UpperCAmelCase )
raise
def lowercase_ ( __UpperCAmelCase ) -> None:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or not node:
return
print(node.data , end=""",""" )
pre_order(node.left )
pre_order(node.right )
def lowercase_ ( __UpperCAmelCase ) -> None:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or not node:
return
in_order(node.left )
print(node.data , end=""",""" )
in_order(node.right )
def lowercase_ ( __UpperCAmelCase ) -> None:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=""",""" )
def lowercase_ ( __UpperCAmelCase ) -> None:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or not node:
return
lowerCAmelCase__ : queue.Queue = queue.Queue()
q.put(__UpperCAmelCase )
while not q.empty():
lowerCAmelCase__ : Union[str, Any] = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowercase_ ( __UpperCAmelCase ) -> None:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or not node:
return
lowerCAmelCase__ : queue.Queue = queue.Queue()
q.put(__UpperCAmelCase )
while not q.empty():
lowerCAmelCase__ : Tuple = []
while not q.empty():
lowerCAmelCase__ : Optional[int] = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(__UpperCAmelCase )
def lowercase_ ( __UpperCAmelCase ) -> None:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or not node:
return
lowerCAmelCase__ : list[TreeNode] = []
lowerCAmelCase__ : List[Any] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=""",""" )
stack.append(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = n.left
# end of while means current node doesn't have left child
lowerCAmelCase__ : List[Any] = stack.pop()
# start to traverse its right child
lowerCAmelCase__ : Optional[Any] = n.right
def lowercase_ ( __UpperCAmelCase ) -> None:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or not node:
return
lowerCAmelCase__ : list[TreeNode] = []
lowerCAmelCase__ : List[str] = node
while n or stack:
while n:
stack.append(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = n.left
lowerCAmelCase__ : Any = stack.pop()
print(n.data , end=""",""" )
lowerCAmelCase__ : Optional[int] = n.right
def lowercase_ ( __UpperCAmelCase ) -> None:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or not node:
return
lowerCAmelCase__ , lowerCAmelCase__ : Dict = [], []
lowerCAmelCase__ : List[Any] = node
stacka.append(__UpperCAmelCase )
while stacka: # to find the reversed order of post order, store it in stack2
lowerCAmelCase__ : Tuple = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(__UpperCAmelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=""",""" )
def lowercase_ ( __UpperCAmelCase = "" , __UpperCAmelCase=50 , __UpperCAmelCase="*" ) -> str:
if not s:
return "\n" + width * char
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = divmod(width - len(__UpperCAmelCase ) - 2 , 2 )
return f"""{left * char} {s} {(left + extra) * char}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("""Binary Tree Traversals"""))
_A = build_tree()
print(prompt("""Pre Order Traversal"""))
pre_order(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal"""))
in_order(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal"""))
post_order(node)
print(prompt() + """\n""")
print(prompt("""Level Order Traversal"""))
level_order(node)
print(prompt() + """\n""")
print(prompt("""Actual Level Order Traversal"""))
level_order_actual(node)
print("""*""" * 5_0 + """\n""")
print(prompt("""Pre Order Traversal - Iteration Version"""))
pre_order_iter(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal - Iteration Version"""))
in_order_iter(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal - Iteration Version"""))
post_order_iter(node)
print(prompt())
| 299 | 1 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
_SCREAMING_SNAKE_CASE = Path(__file__).resolve().parents[3] / """src"""
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
_SCREAMING_SNAKE_CASE = {"""base""": """patrickvonplaten/wav2vec2_tiny_random""", """robust""": """patrickvonplaten/wav2vec2_tiny_random_robust"""}
_SCREAMING_SNAKE_CASE = """zero2"""
_SCREAMING_SNAKE_CASE = """zero3"""
_SCREAMING_SNAKE_CASE = [ZEROa, ZEROa]
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
snake_case_ : Tuple = parameterized.to_safe_name('_'.join(str(__a ) for x in param.args ) )
return f"""{func.__name__}_{param_based_name}"""
# Cartesian-product of zero stages with models to test
_SCREAMING_SNAKE_CASE = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
@parameterized.expand(_A , name_func=_A )
def UpperCAmelCase_ ( self : Tuple , _A : Optional[Any] , _A : Optional[int] ) -> Dict:
"""simple docstring"""
self.run_and_check(
stage=_A , model=_A , distributed=_A , fpaa=_A , )
@require_torch_multi_gpu
@parameterized.expand(_A , name_func=_A )
def UpperCAmelCase_ ( self : int , _A : Any , _A : int ) -> Dict:
"""simple docstring"""
self.run_and_check(
stage=_A , model=_A , distributed=_A , fpaa=_A , )
@parameterized.expand(_A , name_func=_A )
def UpperCAmelCase_ ( self : str , _A : List[str] , _A : str ) -> Tuple:
"""simple docstring"""
self.run_and_check(
stage=_A , model=_A , distributed=_A , fpaa=_A , )
@require_torch_multi_gpu
@parameterized.expand(_A , name_func=_A )
def UpperCAmelCase_ ( self : Optional[int] , _A : int , _A : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
self.run_and_check(
stage=_A , model=_A , distributed=_A , fpaa=_A , )
def UpperCAmelCase_ ( self : Any , _A : Union[str, Any] ) -> Any:
"""simple docstring"""
pass
def UpperCAmelCase_ ( self : List[str] , _A : str , _A : str , _A : int = 10 , _A : bool = True , _A : bool = True , _A : bool = True , ) -> Any:
"""simple docstring"""
snake_case_ : Dict = models[model]
snake_case_ : str = self.run_trainer(
stage=_A , model_name=_A , eval_steps=_A , num_train_epochs=1 , distributed=_A , fpaa=_A , )
self.do_checks(_A )
return output_dir
def UpperCAmelCase_ ( self : Any , _A : str , _A : str , _A : int = 10 , _A : int = 1 , _A : bool = True , _A : bool = True , ) -> Dict:
"""simple docstring"""
snake_case_ : str = self.get_auto_remove_tmp_dir('./xxx' , after=_A )
snake_case_ : Tuple = F"""
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(_A )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
""".split()
if fpaa:
args.extend(['--fp16'] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
snake_case_ : Any = F"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split()
snake_case_ : str = [F"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""]
snake_case_ : int = self.get_launcher(_A )
snake_case_ : List[str] = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(_A , env=self.get_env() )
return output_dir
def UpperCAmelCase_ ( self : Optional[Any] , _A : Optional[Any]=False ) -> List[str]:
"""simple docstring"""
snake_case_ : str = min(2 , get_gpu_count() ) if distributed else 1
return F"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split()
| 703 |
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
# Load checkpoint
snake_case_ : Union[str, Any] = torch.load(__a , map_location='cpu' )
snake_case_ : Union[str, Any] = chkpt['model']
# We have the base model one level deeper than the original XLM repository
snake_case_ : str = {}
for k, v in state_dict.items():
if "pred_layer" in k:
snake_case_ : Tuple = v
else:
snake_case_ : Dict = v
snake_case_ : Tuple = chkpt['params']
snake_case_ : List[Any] = {n: v for n, v in config.items() if not isinstance(__a , (torch.FloatTensor, numpy.ndarray) )}
snake_case_ : Optional[int] = chkpt['dico_word2id']
snake_case_ : List[str] = {s + '</w>' if s.find('@@' ) == -1 and i > 13 else s.replace('@@' , '' ): i for s, i in vocab.items()}
# Save pytorch-model
snake_case_ : List[str] = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
snake_case_ : Dict = pytorch_dump_folder_path + '/' + CONFIG_NAME
snake_case_ : Any = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['vocab_file']
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(__a , __a )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(__a , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__a , indent=2 ) + '\n' )
print(f"""Save vocab file to {pytorch_config_dump_path}""" )
with open(__a , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__a , indent=2 ) + '\n' )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xlm_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 534 | 0 |
from ..utils import DummyObject, requires_backends
class UpperCAmelCase__ ( metaclass=A_ ):
'''simple docstring'''
UpperCAmelCase_ = ['''torch''']
def __init__( self : Union[str, Any] , *UpperCamelCase : List[Any] , **UpperCamelCase : Tuple ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : str , *UpperCamelCase : List[Any] , **UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : Union[str, Any] , *UpperCamelCase : Optional[int] , **UpperCamelCase : Dict ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A_ ):
'''simple docstring'''
UpperCAmelCase_ = ['''torch''']
def __init__( self : List[str] , *UpperCamelCase : Tuple , **UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : List[str] , *UpperCamelCase : List[Any] , **UpperCamelCase : Dict ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : Optional[Any] , *UpperCamelCase : Tuple , **UpperCamelCase : Tuple ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A_ ):
'''simple docstring'''
UpperCAmelCase_ = ['''torch''']
def __init__( self : Tuple , *UpperCamelCase : Optional[int] , **UpperCamelCase : Dict ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : Dict , *UpperCamelCase : Dict , **UpperCamelCase : List[Any] ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : Optional[int] , *UpperCamelCase : Any , **UpperCamelCase : Optional[int] ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A_ ):
'''simple docstring'''
UpperCAmelCase_ = ['''torch''']
def __init__( self : str , *UpperCamelCase : Optional[Any] , **UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : Dict , *UpperCamelCase : List[str] , **UpperCamelCase : str ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : List[str] , *UpperCamelCase : Any , **UpperCamelCase : Any ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A_ ):
'''simple docstring'''
UpperCAmelCase_ = ['''torch''']
def __init__( self : Optional[int] , *UpperCamelCase : str , **UpperCamelCase : List[Any] ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : Any , *UpperCamelCase : List[str] , **UpperCamelCase : str ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : Dict , *UpperCamelCase : Any , **UpperCamelCase : Optional[Any] ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A_ ):
'''simple docstring'''
UpperCAmelCase_ = ['''torch''']
def __init__( self : str , *UpperCamelCase : List[Any] , **UpperCamelCase : Tuple ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : str , *UpperCamelCase : Optional[int] , **UpperCamelCase : Optional[Any] ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : Optional[Any] , *UpperCamelCase : List[str] , **UpperCamelCase : Dict ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A_ ):
'''simple docstring'''
UpperCAmelCase_ = ['''torch''']
def __init__( self : Tuple , *UpperCamelCase : Any , **UpperCamelCase : Tuple ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : Union[str, Any] , *UpperCamelCase : Dict , **UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : List[str] , *UpperCamelCase : int , **UpperCamelCase : int ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A_ ):
'''simple docstring'''
UpperCAmelCase_ = ['''torch''']
def __init__( self : Optional[Any] , *UpperCamelCase : Any , **UpperCamelCase : Any ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : int , *UpperCamelCase : List[Any] , **UpperCamelCase : List[str] ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : List[Any] , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : Optional[Any] ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A_ ):
'''simple docstring'''
UpperCAmelCase_ = ['''torch''']
def __init__( self : Any , *UpperCamelCase : Optional[int] , **UpperCamelCase : List[Any] ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : Optional[int] , *UpperCamelCase : str , **UpperCamelCase : Any ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : Dict , *UpperCamelCase : Any , **UpperCamelCase : Optional[int] ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A_ ):
'''simple docstring'''
UpperCAmelCase_ = ['''torch''']
def __init__( self : Union[str, Any] , *UpperCamelCase : Tuple , **UpperCamelCase : Optional[int] ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : Union[str, Any] , *UpperCamelCase : Optional[Any] , **UpperCamelCase : List[Any] ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : Any , *UpperCamelCase : Optional[Any] , **UpperCamelCase : List[str] ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A_ ):
'''simple docstring'''
UpperCAmelCase_ = ['''torch''']
def __init__( self : List[str] , *UpperCamelCase : List[Any] , **UpperCamelCase : str ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : int , *UpperCamelCase : Any , **UpperCamelCase : Tuple ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : Any , *UpperCamelCase : Dict , **UpperCamelCase : Optional[int] ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
def UpperCamelCase__ ( *UpperCAmelCase_ , **UpperCAmelCase_ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(UpperCAmelCase_ , ['''torch'''] )
def UpperCamelCase__ ( *UpperCAmelCase_ , **UpperCAmelCase_ ) -> List[Any]:
'''simple docstring'''
requires_backends(UpperCAmelCase_ , ['''torch'''] )
def UpperCamelCase__ ( *UpperCAmelCase_ , **UpperCAmelCase_ ) -> List[Any]:
'''simple docstring'''
requires_backends(UpperCAmelCase_ , ['''torch'''] )
def UpperCamelCase__ ( *UpperCAmelCase_ , **UpperCAmelCase_ ) -> Optional[int]:
'''simple docstring'''
requires_backends(UpperCAmelCase_ , ['''torch'''] )
def UpperCamelCase__ ( *UpperCAmelCase_ , **UpperCAmelCase_ ) -> Optional[int]:
'''simple docstring'''
requires_backends(UpperCAmelCase_ , ['''torch'''] )
def UpperCamelCase__ ( *UpperCAmelCase_ , **UpperCAmelCase_ ) -> Optional[int]:
'''simple docstring'''
requires_backends(UpperCAmelCase_ , ['''torch'''] )
def UpperCamelCase__ ( *UpperCAmelCase_ , **UpperCAmelCase_ ) -> Any:
'''simple docstring'''
requires_backends(UpperCAmelCase_ , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A_ ):
'''simple docstring'''
UpperCAmelCase_ = ['''torch''']
def __init__( self : Tuple , *UpperCamelCase : List[Any] , **UpperCamelCase : Optional[int] ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : Any , *UpperCamelCase : List[Any] , **UpperCamelCase : str ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : Any , *UpperCamelCase : Tuple , **UpperCamelCase : Tuple ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A_ ):
'''simple docstring'''
UpperCAmelCase_ = ['''torch''']
def __init__( self : int , *UpperCamelCase : List[Any] , **UpperCamelCase : Optional[Any] ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : Union[str, Any] , *UpperCamelCase : Dict , **UpperCamelCase : Any ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : List[Any] , *UpperCamelCase : Optional[int] , **UpperCamelCase : Any ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A_ ):
'''simple docstring'''
UpperCAmelCase_ = ['''torch''']
def __init__( self : List[Any] , *UpperCamelCase : Any , **UpperCamelCase : List[str] ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : List[str] , *UpperCamelCase : Tuple , **UpperCamelCase : Dict ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : Tuple , *UpperCamelCase : List[str] , **UpperCamelCase : Any ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A_ ):
'''simple docstring'''
UpperCAmelCase_ = ['''torch''']
def __init__( self : List[str] , *UpperCamelCase : int , **UpperCamelCase : Optional[Any] ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : Optional[Any] , *UpperCamelCase : Tuple , **UpperCamelCase : Dict ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : str , *UpperCamelCase : int , **UpperCamelCase : Optional[int] ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A_ ):
'''simple docstring'''
UpperCAmelCase_ = ['''torch''']
def __init__( self : Union[str, Any] , *UpperCamelCase : Dict , **UpperCamelCase : Optional[int] ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : List[Any] , *UpperCamelCase : Dict , **UpperCamelCase : int ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : str , *UpperCamelCase : Dict , **UpperCamelCase : Optional[Any] ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A_ ):
'''simple docstring'''
UpperCAmelCase_ = ['''torch''']
def __init__( self : Tuple , *UpperCamelCase : List[str] , **UpperCamelCase : List[str] ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : List[Any] , *UpperCamelCase : List[Any] , **UpperCamelCase : List[Any] ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : Optional[int] , *UpperCamelCase : int , **UpperCamelCase : int ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A_ ):
'''simple docstring'''
UpperCAmelCase_ = ['''torch''']
def __init__( self : Union[str, Any] , *UpperCamelCase : Any , **UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : Tuple , *UpperCamelCase : Dict , **UpperCamelCase : str ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : Optional[Any] , *UpperCamelCase : Tuple , **UpperCamelCase : List[Any] ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A_ ):
'''simple docstring'''
UpperCAmelCase_ = ['''torch''']
def __init__( self : List[Any] , *UpperCamelCase : List[Any] , **UpperCamelCase : Any ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : int , *UpperCamelCase : Any , **UpperCamelCase : List[Any] ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : Union[str, Any] , *UpperCamelCase : List[Any] , **UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A_ ):
'''simple docstring'''
UpperCAmelCase_ = ['''torch''']
def __init__( self : Tuple , *UpperCamelCase : List[Any] , **UpperCamelCase : Optional[int] ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : str , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : int ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : List[Any] , *UpperCamelCase : List[str] , **UpperCamelCase : List[str] ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A_ ):
'''simple docstring'''
UpperCAmelCase_ = ['''torch''']
def __init__( self : Union[str, Any] , *UpperCamelCase : Optional[int] , **UpperCamelCase : str ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : Any , *UpperCamelCase : Optional[int] , **UpperCamelCase : List[str] ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : Tuple , *UpperCamelCase : Tuple , **UpperCamelCase : Dict ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A_ ):
'''simple docstring'''
UpperCAmelCase_ = ['''torch''']
def __init__( self : List[str] , *UpperCamelCase : Tuple , **UpperCamelCase : int ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : Optional[int] , *UpperCamelCase : Dict , **UpperCamelCase : Optional[Any] ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : Union[str, Any] , *UpperCamelCase : Optional[Any] , **UpperCamelCase : Optional[Any] ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A_ ):
'''simple docstring'''
UpperCAmelCase_ = ['''torch''']
def __init__( self : Any , *UpperCamelCase : Any , **UpperCamelCase : List[Any] ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : str , *UpperCamelCase : Tuple , **UpperCamelCase : Dict ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : Union[str, Any] , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : List[str] ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A_ ):
'''simple docstring'''
UpperCAmelCase_ = ['''torch''']
def __init__( self : Union[str, Any] , *UpperCamelCase : Optional[int] , **UpperCamelCase : List[str] ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : int , *UpperCamelCase : int , **UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : int , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : Optional[Any] ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A_ ):
'''simple docstring'''
UpperCAmelCase_ = ['''torch''']
def __init__( self : List[str] , *UpperCamelCase : Optional[Any] , **UpperCamelCase : Tuple ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : List[Any] , *UpperCamelCase : Dict , **UpperCamelCase : Dict ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : Dict , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : int ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A_ ):
'''simple docstring'''
UpperCAmelCase_ = ['''torch''']
def __init__( self : Tuple , *UpperCamelCase : Optional[Any] , **UpperCamelCase : Tuple ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : int , *UpperCamelCase : Dict , **UpperCamelCase : Optional[Any] ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : List[Any] , *UpperCamelCase : Optional[int] , **UpperCamelCase : Tuple ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A_ ):
'''simple docstring'''
UpperCAmelCase_ = ['''torch''']
def __init__( self : Optional[Any] , *UpperCamelCase : List[str] , **UpperCamelCase : Optional[int] ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : Tuple , *UpperCamelCase : List[Any] , **UpperCamelCase : List[Any] ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : List[str] , *UpperCamelCase : List[str] , **UpperCamelCase : List[Any] ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A_ ):
'''simple docstring'''
UpperCAmelCase_ = ['''torch''']
def __init__( self : Union[str, Any] , *UpperCamelCase : Dict , **UpperCamelCase : List[Any] ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : int , *UpperCamelCase : List[Any] , **UpperCamelCase : Dict ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : int , *UpperCamelCase : Any , **UpperCamelCase : int ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A_ ):
'''simple docstring'''
UpperCAmelCase_ = ['''torch''']
def __init__( self : Tuple , *UpperCamelCase : Dict , **UpperCamelCase : str ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : List[Any] , *UpperCamelCase : Any , **UpperCamelCase : Any ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : Optional[Any] , *UpperCamelCase : Any , **UpperCamelCase : List[str] ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A_ ):
'''simple docstring'''
UpperCAmelCase_ = ['''torch''']
def __init__( self : Optional[int] , *UpperCamelCase : str , **UpperCamelCase : Tuple ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : Union[str, Any] , *UpperCamelCase : Optional[Any] , **UpperCamelCase : Optional[int] ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : Union[str, Any] , *UpperCamelCase : Optional[Any] , **UpperCamelCase : List[Any] ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A_ ):
'''simple docstring'''
UpperCAmelCase_ = ['''torch''']
def __init__( self : Tuple , *UpperCamelCase : List[Any] , **UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : Optional[int] , *UpperCamelCase : int , **UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : Optional[int] , *UpperCamelCase : str , **UpperCamelCase : List[str] ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A_ ):
'''simple docstring'''
UpperCAmelCase_ = ['''torch''']
def __init__( self : List[Any] , *UpperCamelCase : Any , **UpperCamelCase : Optional[Any] ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : str , *UpperCamelCase : Optional[Any] , **UpperCamelCase : str ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : Any , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : Dict ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A_ ):
'''simple docstring'''
UpperCAmelCase_ = ['''torch''']
def __init__( self : int , *UpperCamelCase : Optional[int] , **UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : int , *UpperCamelCase : List[str] , **UpperCamelCase : Any ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : Tuple , *UpperCamelCase : Dict , **UpperCamelCase : Dict ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A_ ):
'''simple docstring'''
UpperCAmelCase_ = ['''torch''']
def __init__( self : Tuple , *UpperCamelCase : str , **UpperCamelCase : List[str] ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : Union[str, Any] , *UpperCamelCase : str , **UpperCamelCase : Tuple ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : int , *UpperCamelCase : str , **UpperCamelCase : Any ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A_ ):
'''simple docstring'''
UpperCAmelCase_ = ['''torch''']
def __init__( self : Tuple , *UpperCamelCase : Dict , **UpperCamelCase : Tuple ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : Optional[int] , *UpperCamelCase : Any , **UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : List[Any] , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : Optional[int] ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A_ ):
'''simple docstring'''
UpperCAmelCase_ = ['''torch''']
def __init__( self : Tuple , *UpperCamelCase : List[Any] , **UpperCamelCase : str ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : Any , *UpperCamelCase : List[Any] , **UpperCamelCase : str ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : str , *UpperCamelCase : Optional[int] , **UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A_ ):
'''simple docstring'''
UpperCAmelCase_ = ['''torch''']
def __init__( self : Optional[int] , *UpperCamelCase : Dict , **UpperCamelCase : Optional[Any] ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : Dict , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : List[Any] ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : str , *UpperCamelCase : int , **UpperCamelCase : Tuple ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A_ ):
'''simple docstring'''
UpperCAmelCase_ = ['''torch''']
def __init__( self : Optional[Any] , *UpperCamelCase : List[Any] , **UpperCamelCase : int ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : List[str] , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : Optional[int] ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : List[Any] , *UpperCamelCase : Optional[Any] , **UpperCamelCase : List[str] ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A_ ):
'''simple docstring'''
UpperCAmelCase_ = ['''torch''']
def __init__( self : Optional[Any] , *UpperCamelCase : Tuple , **UpperCamelCase : str ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : Dict , *UpperCamelCase : int , **UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : Tuple , *UpperCamelCase : List[str] , **UpperCamelCase : Any ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A_ ):
'''simple docstring'''
UpperCAmelCase_ = ['''torch''']
def __init__( self : Dict , *UpperCamelCase : Tuple , **UpperCamelCase : str ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : Any , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : int ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : Tuple , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : List[str] ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A_ ):
'''simple docstring'''
UpperCAmelCase_ = ['''torch''']
def __init__( self : List[Any] , *UpperCamelCase : List[str] , **UpperCamelCase : List[str] ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : Optional[Any] , *UpperCamelCase : int , **UpperCamelCase : List[str] ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : Tuple , *UpperCamelCase : Optional[int] , **UpperCamelCase : Dict ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A_ ):
'''simple docstring'''
UpperCAmelCase_ = ['''torch''']
def __init__( self : Any , *UpperCamelCase : int , **UpperCamelCase : int ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : Optional[Any] , *UpperCamelCase : Tuple , **UpperCamelCase : Dict ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : Dict , *UpperCamelCase : str , **UpperCamelCase : Optional[int] ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A_ ):
'''simple docstring'''
UpperCAmelCase_ = ['''torch''']
def __init__( self : List[str] , *UpperCamelCase : Any , **UpperCamelCase : Dict ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : int , *UpperCamelCase : int , **UpperCamelCase : Any ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : Optional[int] , *UpperCamelCase : Optional[Any] , **UpperCamelCase : int ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A_ ):
'''simple docstring'''
UpperCAmelCase_ = ['''torch''']
def __init__( self : Any , *UpperCamelCase : Optional[Any] , **UpperCamelCase : str ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : List[str] , *UpperCamelCase : Optional[Any] , **UpperCamelCase : List[str] ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : Any , *UpperCamelCase : Optional[int] , **UpperCamelCase : Any ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A_ ):
'''simple docstring'''
UpperCAmelCase_ = ['''torch''']
def __init__( self : str , *UpperCamelCase : List[str] , **UpperCamelCase : str ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : Any , *UpperCamelCase : Dict , **UpperCamelCase : List[Any] ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : Union[str, Any] , *UpperCamelCase : int , **UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A_ ):
'''simple docstring'''
UpperCAmelCase_ = ['''torch''']
def __init__( self : Any , *UpperCamelCase : Any , **UpperCamelCase : Any ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : List[Any] , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : Tuple ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : int , *UpperCamelCase : int , **UpperCamelCase : Optional[Any] ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A_ ):
'''simple docstring'''
UpperCAmelCase_ = ['''torch''']
def __init__( self : Any , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : Any ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : str , *UpperCamelCase : Optional[Any] , **UpperCamelCase : List[str] ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : Union[str, Any] , *UpperCamelCase : Optional[Any] , **UpperCamelCase : Optional[int] ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A_ ):
'''simple docstring'''
UpperCAmelCase_ = ['''torch''']
def __init__( self : Any , *UpperCamelCase : str , **UpperCamelCase : Any ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : Tuple , *UpperCamelCase : Any , **UpperCamelCase : List[Any] ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : Tuple , *UpperCamelCase : str , **UpperCamelCase : int ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A_ ):
'''simple docstring'''
UpperCAmelCase_ = ['''torch''']
def __init__( self : List[Any] , *UpperCamelCase : List[Any] , **UpperCamelCase : str ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : str , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : List[str] ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : Any , *UpperCamelCase : str , **UpperCamelCase : Optional[int] ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=A_ ):
'''simple docstring'''
UpperCAmelCase_ = ['''torch''']
def __init__( self : int , *UpperCamelCase : Optional[int] , **UpperCamelCase : Any ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : List[str] , *UpperCamelCase : Dict , **UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase_ ( cls : List[Any] , *UpperCamelCase : Optional[int] , **UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] ) | 322 |
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
UpperCamelCase__ = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class UpperCAmelCase__ ( datasets.BuilderConfig ):
'''simple docstring'''
UpperCAmelCase_ = None
def UpperCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , ) -> List[str]:
'''simple docstring'''
import pyspark
def generate_fn():
_lowercase : Any = df.select('''*''' , pyspark.sql.functions.spark_partition_id().alias('''part_id''' ) )
for partition_id in partition_order:
_lowercase : int = df_with_partition_id.select('''*''' ).where(F'part_id = {partition_id}' ).drop('''part_id''' )
_lowercase : Union[str, Any] = partition_df.collect()
_lowercase : int = 0
for row in rows:
yield F'{partition_id}_{row_id}', row.asDict()
row_id += 1
return generate_fn
class UpperCAmelCase__ ( _BaseExamplesIterable ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase : "pyspark.sql.DataFrame" , UpperCamelCase : List[str]=None , ):
"""simple docstring"""
_lowercase : List[Any] = df
_lowercase : List[str] = partition_order or range(self.df.rdd.getNumPartitions() )
_lowercase : Optional[Any] = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : List[Any] ):
"""simple docstring"""
yield from self.generate_examples_fn()
def lowerCAmelCase_ ( self : Tuple , UpperCamelCase : np.random.Generator ):
"""simple docstring"""
_lowercase : Dict = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(UpperCamelCase )
return SparkExamplesIterable(self.df , partition_order=UpperCamelCase )
def lowerCAmelCase_ ( self : List[Any] , UpperCamelCase : int , UpperCamelCase : int ):
"""simple docstring"""
_lowercase : List[Any] = self.split_shard_indices_by_worker(UpperCamelCase , UpperCamelCase )
return SparkExamplesIterable(self.df , partition_order=UpperCamelCase )
@property
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
return len(self.partition_order )
class UpperCAmelCase__ ( datasets.DatasetBuilder ):
'''simple docstring'''
UpperCAmelCase_ = SparkConfig
def __init__( self : Optional[int] , UpperCamelCase : "pyspark.sql.DataFrame" , UpperCamelCase : str = None , UpperCamelCase : str = None , **UpperCamelCase : Union[str, Any] , ):
"""simple docstring"""
import pyspark
_lowercase : List[str] = pyspark.sql.SparkSession.builder.getOrCreate()
_lowercase : Any = df
_lowercase : Dict = working_dir
super().__init__(
cache_dir=UpperCamelCase , config_name=str(self.df.semanticHash() ) , **UpperCamelCase , )
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
def create_cache_and_write_probe(UpperCamelCase : Tuple ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=UpperCamelCase )
_lowercase : Tuple = os.path.join(self._cache_dir , '''fs_test''' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(UpperCamelCase , '''a''' )
return [probe_file]
if self._spark.conf.get('''spark.master''' , '''''' ).startswith('''local''' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
_lowercase : Any = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(UpperCamelCase ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'''When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir''' )
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def lowerCAmelCase_ ( self : str , UpperCamelCase : datasets.download.download_manager.DownloadManager ):
"""simple docstring"""
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def lowerCAmelCase_ ( self : str , UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
import pyspark
def get_arrow_batch_size(UpperCamelCase : int ):
for batch in it:
yield pa.RecordBatch.from_pydict({'''batch_bytes''': [batch.nbytes]} )
_lowercase : int = self.df.count()
_lowercase : Optional[Any] = df_num_rows if df_num_rows <= 1_00 else 1_00
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
_lowercase : List[Any] = (
self.df.limit(UpperCamelCase )
.repartition(1 )
.mapInArrow(UpperCamelCase , '''batch_bytes: long''' )
.agg(pyspark.sql.functions.sum('''batch_bytes''' ).alias('''sample_bytes''' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
_lowercase : int = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
_lowercase : List[Any] = min(UpperCamelCase , int(approx_total_size / max_shard_size ) )
_lowercase : Optional[int] = self.df.repartition(UpperCamelCase )
def lowerCAmelCase_ ( self : str , UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : int , ):
"""simple docstring"""
import pyspark
_lowercase : Optional[Any] = ParquetWriter if file_format == '''parquet''' else ArrowWriter
_lowercase : List[str] = os.path.join(self._working_dir , os.path.basename(UpperCamelCase ) ) if self._working_dir else fpath
_lowercase : str = file_format == '''parquet'''
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
_lowercase : Tuple = self.config.features
_lowercase : Tuple = self._writer_batch_size
_lowercase : Optional[Any] = self._fs.storage_options
def write_arrow(UpperCamelCase : Dict ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
_lowercase : str = pyspark.TaskContext().taskAttemptId()
_lowercase : List[Any] = next(UpperCamelCase , UpperCamelCase )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
_lowercase : Dict = 0
_lowercase : Optional[int] = writer_class(
features=UpperCamelCase , path=working_fpath.replace('''SSSSS''' , F'{shard_id:05d}' ).replace('''TTTTT''' , F'{task_id:05d}' ) , writer_batch_size=UpperCamelCase , storage_options=UpperCamelCase , embed_local_files=UpperCamelCase , )
_lowercase : int = pa.Table.from_batches([first_batch] )
writer.write_table(UpperCamelCase )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
_lowercase , _lowercase : int = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
shard_id += 1
_lowercase : List[str] = writer_class(
features=writer._features , path=working_fpath.replace('''SSSSS''' , F'{shard_id:05d}' ).replace('''TTTTT''' , F'{task_id:05d}' ) , writer_batch_size=UpperCamelCase , storage_options=UpperCamelCase , embed_local_files=UpperCamelCase , )
_lowercase : List[str] = pa.Table.from_batches([batch] )
writer.write_table(UpperCamelCase )
if writer._num_bytes > 0:
_lowercase , _lowercase : List[Any] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(UpperCamelCase ) ):
_lowercase : List[str] = os.path.join(os.path.dirname(UpperCamelCase ) , os.path.basename(UpperCamelCase ) )
shutil.move(UpperCamelCase , UpperCamelCase )
_lowercase : int = (
self.df.mapInArrow(UpperCamelCase , '''task_id: long, num_examples: long, num_bytes: long''' )
.groupBy('''task_id''' )
.agg(
pyspark.sql.functions.sum('''num_examples''' ).alias('''total_num_examples''' ) , pyspark.sql.functions.sum('''num_bytes''' ).alias('''total_num_bytes''' ) , pyspark.sql.functions.count('''num_bytes''' ).alias('''num_shards''' ) , pyspark.sql.functions.collect_list('''num_examples''' ).alias('''shard_lengths''' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def lowerCAmelCase_ ( self : Dict , UpperCamelCase : "datasets.SplitGenerator" , UpperCamelCase : str = "arrow" , UpperCamelCase : Optional[Union[str, int]] = None , UpperCamelCase : Optional[int] = None , **UpperCamelCase : Union[str, Any] , ):
"""simple docstring"""
self._validate_cache_dir()
_lowercase : List[str] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(UpperCamelCase )
_lowercase : Optional[Any] = not is_remote_filesystem(self._fs )
_lowercase : List[Any] = os.path.join if is_local else posixpath.join
_lowercase : Optional[int] = '''-TTTTT-SSSSS-of-NNNNN'''
_lowercase : str = F'{self.name}-{split_generator.name}{SUFFIX}.{file_format}'
_lowercase : List[str] = path_join(self._output_dir , UpperCamelCase )
_lowercase : Optional[Any] = 0
_lowercase : Any = 0
_lowercase : Tuple = 0
_lowercase : int = []
_lowercase : Union[str, Any] = []
for task_id, content in self._prepare_split_single(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : Union[str, Any] = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(UpperCamelCase )
_lowercase : Optional[Any] = total_num_examples
_lowercase : int = total_num_bytes
# should rename everything at the end
logger.debug(F'Renaming {total_shards} shards.' )
if total_shards > 1:
_lowercase : List[Any] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
_lowercase : Any = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int , ):
rename(
UpperCamelCase , fpath.replace('''SSSSS''' , F'{shard_id:05d}' ).replace('''TTTTT''' , F'{task_id:05d}' ) , fpath.replace('''TTTTT-SSSSS''' , F'{global_shard_id:05d}' ).replace('''NNNNN''' , F'{total_shards:05d}' ) , )
_lowercase : List[Any] = []
_lowercase : int = 0
for i in range(len(UpperCamelCase ) ):
_lowercase , _lowercase : Optional[Any] = task_id_and_num_shards[i]
for shard_id in range(UpperCamelCase ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(UpperCamelCase , len(UpperCamelCase ) ).map(lambda UpperCamelCase : _rename_shard(*UpperCamelCase ) ).collect()
else:
# don't use any pattern
_lowercase : Optional[int] = 0
_lowercase : Optional[Any] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('''SSSSS''' , F'{shard_id:05d}' ).replace('''TTTTT''' , F'{task_id:05d}' ) , fpath.replace(UpperCamelCase , '''''' ) , )
def lowerCAmelCase_ ( self : Optional[int] , UpperCamelCase : "datasets.SplitGenerator" , ):
"""simple docstring"""
return SparkExamplesIterable(self.df ) | 322 | 1 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( snake_case):
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''')
for cell_n in range(1, len(grid[0])):
grid[0][cell_n] += grid[0][cell_n - 1]
__snake_case = grid[0]
for row_n in range(1, len(snake_case)):
__snake_case = grid[row_n]
__snake_case = fill_row(snake_case, snake_case)
__snake_case = grid[row_n]
return grid[-1][-1]
def SCREAMING_SNAKE_CASE ( snake_case, snake_case):
current_row[0] += row_above[0]
for cell_n in range(1, len(snake_case)):
current_row[cell_n] += min(current_row[cell_n - 1], row_above[cell_n])
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod() | 93 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowercase : Dict = {
"configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"],
"processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[int] = ["VisionTextDualEncoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[int] = ["FlaxVisionTextDualEncoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Any = ["TFVisionTextDualEncoderModel"]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
__lowercase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure) | 93 | 1 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class lowercase_ :
def __init__( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : List[str]=13 , __lowerCamelCase : List[str]=7 , __lowerCamelCase : Dict=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : List[Any]=99 , __lowerCamelCase : Any=32 , __lowerCamelCase : Union[str, Any]=5 , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : Optional[Any]=37 , __lowerCamelCase : Any="gelu" , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : Dict=512 , __lowerCamelCase : int=16 , __lowerCamelCase : Any=2 , __lowerCamelCase : Optional[Any]=0.0_2 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : Any=4 , __lowerCamelCase : Union[str, Any]=None , ):
snake_case__ : Any = parent
snake_case__ : Optional[Any] = batch_size
snake_case__ : Any = seq_length
snake_case__ : Dict = is_training
snake_case__ : List[str] = use_input_mask
snake_case__ : Any = use_token_type_ids
snake_case__ : Optional[int] = use_labels
snake_case__ : List[Any] = vocab_size
snake_case__ : Union[str, Any] = hidden_size
snake_case__ : Tuple = num_hidden_layers
snake_case__ : Union[str, Any] = num_attention_heads
snake_case__ : Optional[Any] = intermediate_size
snake_case__ : str = hidden_act
snake_case__ : int = hidden_dropout_prob
snake_case__ : str = attention_probs_dropout_prob
snake_case__ : str = max_position_embeddings
snake_case__ : Union[str, Any] = type_vocab_size
snake_case__ : List[Any] = type_sequence_label_size
snake_case__ : Dict = initializer_range
snake_case__ : str = num_labels
snake_case__ : Optional[int] = num_choices
snake_case__ : int = scope
def _lowerCAmelCase ( self : int ):
snake_case__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Optional[int] = None
if self.use_input_mask:
snake_case__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : List[Any] = None
if self.use_token_type_ids:
snake_case__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ : Any = None
snake_case__ : List[str] = None
snake_case__ : Optional[int] = None
if self.use_labels:
snake_case__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
snake_case__ : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase ( self : Tuple ):
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , use_stable_embedding=__lowerCamelCase , )
def _lowerCAmelCase ( self : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[int] ):
snake_case__ : int = OpenLlamaModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
snake_case__ : str = model(__lowerCamelCase , attention_mask=__lowerCamelCase )
snake_case__ : List[Any] = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] , ):
snake_case__ : List[Any] = True
snake_case__ : Tuple = OpenLlamaModel(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
snake_case__ : Optional[Any] = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , )
snake_case__ : Optional[Any] = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , )
snake_case__ : Dict = model(__lowerCamelCase , attention_mask=__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : str , ):
snake_case__ : List[str] = OpenLlamaForCausalLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
snake_case__ : int = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self : Any , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] , ):
snake_case__ : List[str] = True
snake_case__ : Tuple = True
snake_case__ : str = OpenLlamaForCausalLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
# first forward pass
snake_case__ : Tuple = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , use_cache=__lowerCamelCase , )
snake_case__ : int = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case__ : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case__ : str = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
snake_case__ : Dict = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case__ : int = torch.cat([input_mask, next_mask] , dim=-1 )
snake_case__ : Union[str, Any] = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , output_hidden_states=__lowerCamelCase , )['hidden_states'][0]
snake_case__ : int = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase , output_hidden_states=__lowerCamelCase , )['hidden_states'][0]
# select random slice
snake_case__ : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case__ : Optional[int] = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case__ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-3 ) )
def _lowerCAmelCase ( self : str ):
snake_case__ : Optional[int] = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : List[Any] = config_and_inputs
snake_case__ : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowercase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
A_ = (OpenLlamaForCausalLM,) if is_torch_available() else ()
A_ = (
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ = False
A_ = False
def _lowerCAmelCase ( self : Dict ):
snake_case__ : Any = OpenLlamaModelTester(self )
snake_case__ : Optional[Any] = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 )
def _lowerCAmelCase ( self : Optional[int] ):
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : str ):
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _lowerCAmelCase ( self : List[str] ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case__ : Union[str, Any] = type
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _lowerCAmelCase ( self : Union[str, Any] ):
snake_case__ , snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Union[str, Any] = 3
snake_case__ : List[str] = input_dict['input_ids']
snake_case__ : Tuple = input_ids.ne(1 ).to(__lowerCamelCase )
snake_case__ : Dict = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case__ : Tuple = OpenLlamaForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
snake_case__ : Optional[Any] = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowerCAmelCase ( self : Dict ):
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : int = 3
snake_case__ : List[Any] = 'single_label_classification'
snake_case__ : int = input_dict['input_ids']
snake_case__ : Union[str, Any] = input_ids.ne(1 ).to(__lowerCamelCase )
snake_case__ : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case__ : Tuple = OpenLlamaForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
snake_case__ : str = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowerCAmelCase ( self : Tuple ):
snake_case__ , snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Any = 3
snake_case__ : str = 'multi_label_classification'
snake_case__ : int = input_dict['input_ids']
snake_case__ : Union[str, Any] = input_ids.ne(1 ).to(__lowerCamelCase )
snake_case__ : str = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
snake_case__ : str = OpenLlamaForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
snake_case__ : Tuple = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def _lowerCAmelCase ( self : Union[str, Any] ):
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def _lowerCAmelCase ( self : List[Any] , __lowerCamelCase : Any ):
snake_case__ , snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : List[Any] = ids_tensor([1, 10] , config.vocab_size )
snake_case__ : Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
snake_case__ : int = OpenLlamaModel(__lowerCamelCase )
original_model.to(__lowerCamelCase )
original_model.eval()
snake_case__ : Optional[Any] = original_model(__lowerCamelCase ).last_hidden_state
snake_case__ : Union[str, Any] = original_model(__lowerCamelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
snake_case__ : int = {'type': scaling_type, 'factor': 1_0.0}
snake_case__ : Optional[int] = OpenLlamaModel(__lowerCamelCase )
scaled_model.to(__lowerCamelCase )
scaled_model.eval()
snake_case__ : List[Any] = scaled_model(__lowerCamelCase ).last_hidden_state
snake_case__ : Tuple = scaled_model(__lowerCamelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-5 ) )
| 270 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
A_ = logging.get_logger(__name__)
class lowercase_ ( lowerCAmelCase_ ):
def __init__( self : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : float , **__lowerCamelCase : Optional[int] ):
snake_case__ : int = feature_size
snake_case__ : List[str] = sampling_rate
snake_case__ : Any = padding_value
snake_case__ : Union[str, Any] = kwargs.pop('padding_side' , 'right' )
snake_case__ : List[Any] = kwargs.pop('return_attention_mask' , __lowerCamelCase )
super().__init__(**__lowerCamelCase )
def _lowerCAmelCase ( self : Dict , __lowerCamelCase : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , __lowerCamelCase : Union[bool, str, PaddingStrategy] = True , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , ):
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(__lowerCamelCase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
snake_case__ : int = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'
F" to this method that includes {self.model_input_names[0]}, but you provided"
F" {list(processed_features.keys() )}" )
snake_case__ : Tuple = processed_features[self.model_input_names[0]]
snake_case__ : Union[str, Any] = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(__lowerCamelCase ) == 0:
if return_attention_mask:
snake_case__ : str = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
snake_case__ : Dict = required_input[0]
if isinstance(__lowerCamelCase , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
snake_case__ : List[Any] = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(__lowerCamelCase ):
snake_case__ : List[Any] = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(__lowerCamelCase ):
snake_case__ : int = 'tf'
elif is_torch_tensor(__lowerCamelCase ):
snake_case__ : str = 'pt'
elif isinstance(__lowerCamelCase , (int, float, list, tuple, np.ndarray) ):
snake_case__ : Any = 'np'
else:
raise ValueError(
F"type of {first_element} unknown: {type(__lowerCamelCase )}. "
'Should be one of a python, numpy, pytorch or tensorflow object.' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
snake_case__ : List[Any] = to_numpy(__lowerCamelCase )
else:
snake_case__ : List[str] = [to_numpy(__lowerCamelCase ) for v in value]
# Convert padding_strategy in PaddingStrategy
snake_case__ : str = self._get_padding_strategies(padding=__lowerCamelCase , max_length=__lowerCamelCase )
snake_case__ : List[Any] = processed_features[self.model_input_names[0]]
snake_case__ : Tuple = len(__lowerCamelCase )
if not all(len(__lowerCamelCase ) == batch_size for v in processed_features.values() ):
raise ValueError('Some items in the output dictionary have a different batch size than others.' )
snake_case__ : str = []
for i in range(__lowerCamelCase ):
snake_case__ : str = {k: v[i] for k, v in processed_features.items()}
# truncation
snake_case__ : Optional[Any] = self._truncate(
__lowerCamelCase , max_length=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , truncation=__lowerCamelCase , )
truncated_inputs.append(__lowerCamelCase )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
snake_case__ : Any = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
snake_case__ : Union[str, Any] = PaddingStrategy.MAX_LENGTH
snake_case__ : List[Any] = {}
for i in range(__lowerCamelCase ):
# padding
snake_case__ : Tuple = self._pad(
truncated_inputs[i] , max_length=__lowerCamelCase , padding_strategy=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
for key, value in outputs.items():
if key not in batch_outputs:
snake_case__ : Union[str, Any] = []
if value.dtype is np.dtype(np.floataa ):
snake_case__ : int = value.astype(np.floataa )
batch_outputs[key].append(__lowerCamelCase )
return BatchFeature(__lowerCamelCase , tensor_type=__lowerCamelCase )
def _lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : Union[Dict[str, np.ndarray], BatchFeature] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , ):
snake_case__ : Optional[int] = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
snake_case__ : Tuple = len(__lowerCamelCase )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
snake_case__ : Dict = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
snake_case__ : List[str] = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(__lowerCamelCase ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
snake_case__ : Optional[int] = np.ones(len(__lowerCamelCase ) , dtype=np.intaa )
if needs_to_be_padded:
snake_case__ : List[Any] = max_length - len(__lowerCamelCase )
if self.padding_side == "right":
if return_attention_mask:
snake_case__ : List[str] = np.pad(
processed_features['attention_mask'] , (0, difference) )
snake_case__ : List[Any] = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
snake_case__ : int = np.pad(
__lowerCamelCase , __lowerCamelCase , 'constant' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
snake_case__ : str = np.pad(
processed_features['attention_mask'] , (difference, 0) )
snake_case__ : int = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
snake_case__ : int = np.pad(
__lowerCamelCase , __lowerCamelCase , 'constant' , constant_values=self.padding_value )
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return processed_features
def _lowerCAmelCase ( self : int , __lowerCamelCase : Union[Dict[str, np.ndarray], BatchFeature] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , ):
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('When setting ``truncation=True``, make sure that ``max_length`` is defined.' )
snake_case__ : Optional[int] = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
snake_case__ : List[str] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
snake_case__ : str = len(__lowerCamelCase ) > max_length
if needs_to_be_truncated:
snake_case__ : Optional[int] = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
snake_case__ : List[Any] = processed_features['attention_mask'][:max_length]
return processed_features
def _lowerCAmelCase ( self : Dict , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Optional[Any]=None ):
# Get padding strategy
if padding is not False:
if padding is True:
snake_case__ : int = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(__lowerCamelCase , __lowerCamelCase ):
snake_case__ : str = PaddingStrategy(__lowerCamelCase )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
snake_case__ : str = padding
else:
snake_case__ : Union[str, Any] = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'
' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.' )
return padding_strategy
| 270 | 1 |
from __future__ import annotations
def lowerCAmelCase_ ( lowercase: Union[str, Any] ) -> list[int]: # This function is recursive
'''simple docstring'''
_UpperCamelCase: int = len(lowercase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
_UpperCamelCase: str = array[0]
_UpperCamelCase: Optional[Any] = False
_UpperCamelCase: Any = 1
_UpperCamelCase: list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
_UpperCamelCase: Union[str, Any] = True
_UpperCamelCase: List[str] = [element for element in array[i:] if element >= array[i]]
_UpperCamelCase: Union[str, Any] = longest_subsequence(lowercase )
if len(lowercase ) > len(lowercase ):
_UpperCamelCase: List[str] = temp_array
else:
i += 1
_UpperCamelCase: List[str] = [element for element in array[1:] if element >= pivot]
_UpperCamelCase: List[str] = [pivot, *longest_subsequence(lowercase )]
if len(lowercase ) > len(lowercase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod() | 719 | from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def lowerCAmelCase_ ( lowercase: Optional[Any] , lowercase: Tuple ) -> Any:
'''simple docstring'''
_UpperCamelCase: Union[str, Any] = []
for part_id in partition_order:
_UpperCamelCase: int = df.where(F"""SPARK_PARTITION_ID() = {part_id}""" ).collect()
for row_idx, row in enumerate(lowercase ):
expected_row_ids_and_row_dicts.append((F"""{part_id}_{row_idx}""", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def lowerCAmelCase_ ( ) -> List[str]:
'''simple docstring'''
_UpperCamelCase: Optional[int] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
_UpperCamelCase: int = spark.range(100 ).repartition(1 )
_UpperCamelCase: int = Spark(lowercase )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def lowerCAmelCase_ ( ) -> List[str]:
'''simple docstring'''
_UpperCamelCase: List[Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
_UpperCamelCase: Optional[Any] = spark.range(10 ).repartition(2 )
_UpperCamelCase: int = [1, 0]
_UpperCamelCase: Any = _generate_iterable_examples(lowercase , lowercase ) # Reverse the partitions.
_UpperCamelCase: List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(lowercase , lowercase )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
_UpperCamelCase , _UpperCamelCase: List[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCAmelCase_ ( ) -> Any:
'''simple docstring'''
_UpperCamelCase: Optional[Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
_UpperCamelCase: Any = spark.range(10 ).repartition(1 )
_UpperCamelCase: int = SparkExamplesIterable(lowercase )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(lowercase ):
assert row_id == F"""0_{i}"""
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def lowerCAmelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase: Any = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
_UpperCamelCase: str = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
_UpperCamelCase: Union[str, Any] = lambda lowercase : x.reverse()
_UpperCamelCase: List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(lowercase , [2, 1, 0] )
_UpperCamelCase: Union[str, Any] = SparkExamplesIterable(lowercase ).shuffle_data_sources(lowercase )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(lowercase ):
_UpperCamelCase , _UpperCamelCase: List[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCAmelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase: str = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
_UpperCamelCase: Tuple = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
_UpperCamelCase: List[Any] = SparkExamplesIterable(lowercase ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
_UpperCamelCase: Dict = _get_expected_row_ids_and_row_dicts_for_partition_order(lowercase , [0, 2] )
for i, (row_id, row_dict) in enumerate(lowercase ):
_UpperCamelCase , _UpperCamelCase: Optional[int] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
_UpperCamelCase: str = SparkExamplesIterable(lowercase ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
_UpperCamelCase: str = _get_expected_row_ids_and_row_dicts_for_partition_order(lowercase , [1, 3] )
for i, (row_id, row_dict) in enumerate(lowercase ):
_UpperCamelCase , _UpperCamelCase: str = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCAmelCase_ ( ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase: Any = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
_UpperCamelCase: Union[str, Any] = spark.range(100 ).repartition(1 )
_UpperCamelCase: Optional[Any] = Spark(lowercase )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100 | 264 | 0 |
'''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowerCAmelCase = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE (__A ):
"""simple docstring"""
_a : int = ['''pixel_values''']
def __init__( self , UpperCamelCase__ = True , UpperCamelCase__ = None , UpperCamelCase__ = PILImageResampling.BICUBIC , UpperCamelCase__ = True , UpperCamelCase__ = None , UpperCamelCase__ = True , UpperCamelCase__ = 1 / 255 , UpperCamelCase__ = True , UpperCamelCase__ = IMAGENET_DEFAULT_MEAN , UpperCamelCase__ = IMAGENET_DEFAULT_STD , **UpperCamelCase__ , ):
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
a_ = size if size is not None else {'shortest_edge': 224}
a_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
a_ = crop_size if crop_size is not None else {'height': 224, 'width': 224}
a_ = get_size_dict(UpperCamelCase__ , param_name='crop_size' )
a_ = do_resize
a_ = size
a_ = resample
a_ = do_center_crop
a_ = crop_size
a_ = do_rescale
a_ = rescale_factor
a_ = do_normalize
a_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
a_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _a ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = PILImageResampling.BICUBIC , UpperCamelCase__ = None , **UpperCamelCase__ , ):
"""simple docstring"""
a_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
a_ = int((256 / 224) * size['shortest_edge'] )
a_ = get_resize_output_image_size(UpperCamelCase__ , size=UpperCamelCase__ , default_to_square=UpperCamelCase__ )
a_ = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f'Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}' )
return resize(
UpperCamelCase__ , size=(size_dict['height'], size_dict['width']) , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def _a ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , **UpperCamelCase__ , ):
"""simple docstring"""
a_ = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'Size dict must have keys \'height\' and \'width\'. Got {size.keys()}' )
return center_crop(UpperCamelCase__ , size=(size['height'], size['width']) , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def _a ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , **UpperCamelCase__ , ):
"""simple docstring"""
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def _a ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , **UpperCamelCase__ , ):
"""simple docstring"""
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def _a ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = ChannelDimension.FIRST , **UpperCamelCase__ , ):
"""simple docstring"""
a_ = do_resize if do_resize is not None else self.do_resize
a_ = resample if resample is not None else self.resample
a_ = do_center_crop if do_center_crop is not None else self.do_center_crop
a_ = do_rescale if do_rescale is not None else self.do_rescale
a_ = rescale_factor if rescale_factor is not None else self.rescale_factor
a_ = do_normalize if do_normalize is not None else self.do_normalize
a_ = image_mean if image_mean is not None else self.image_mean
a_ = image_std if image_std is not None else self.image_std
a_ = size if size is not None else self.size
a_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
a_ = crop_size if crop_size is not None else self.crop_size
a_ = get_size_dict(UpperCamelCase__ , param_name='crop_size' )
a_ = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
a_ = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
a_ = [self.resize(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for image in images]
if do_center_crop:
a_ = [self.center_crop(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
if do_rescale:
a_ = [self.rescale(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
if do_normalize:
a_ = [self.normalize(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for image in images]
a_ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
a_ = {'pixel_values': images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 536 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __SCREAMING_SNAKE_CASE (__A ):
"""simple docstring"""
_a : List[Any] = ['''image_processor''', '''tokenizer''']
_a : List[Any] = '''ViTImageProcessor'''
_a : int = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ):
"""simple docstring"""
a_ = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCamelCase__ , )
a_ = kwargs.pop('feature_extractor' )
a_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ):
"""simple docstring"""
if text is None and visual_prompt is None and images is None:
raise ValueError('You have to specify either text, visual prompt or images.' )
if text is not None and visual_prompt is not None:
raise ValueError('You have to specify exactly one type of prompt. Either text or visual prompt.' )
if text is not None:
a_ = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if visual_prompt is not None:
a_ = self.image_processor(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if images is not None:
a_ = self.image_processor(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if visual_prompt is not None and images is not None:
a_ = {
'pixel_values': image_features.pixel_values,
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
a_ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
a_ = {
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase__ ) , tensor_type=UpperCamelCase__ )
def _a ( self , *UpperCamelCase__ , **UpperCamelCase__ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def _a ( self , *UpperCamelCase__ , **UpperCamelCase__ ):
"""simple docstring"""
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def _a ( self ):
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , UpperCamelCase__ , )
return self.image_processor_class
@property
def _a ( self ):
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , UpperCamelCase__ , )
return self.image_processor
| 536 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( lowercase__, unittest.TestCase):
UpperCamelCase__ = LEDTokenizer
UpperCamelCase__ = LEDTokenizerFast
UpperCamelCase__ = True
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
super().setUp()
lowercase_ : List[Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
lowercase_ : Any = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
lowercase_ : Optional[int] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowercase_ : Dict = {"""unk_token""": """<unk>"""}
lowercase_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowercase_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowercase_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowercase_ ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , **lowercase_ : Optional[int] ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : int , **lowercase_ : Tuple ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Tuple ):
return "lower newer", "lower newer"
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
lowercase_ : Any = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase_ : Dict = tokenizer(lowercase_ , max_length=len(lowercase_ ) , padding=lowercase_ , return_tensors="""pt""" )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowercase_ : Dict = batch.input_ids.tolist()[0]
self.assertListEqual(lowercase_ , lowercase_ )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : str = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase_ : Dict = tokenizer(lowercase_ , padding=lowercase_ , return_tensors="""pt""" )
self.assertIn("""input_ids""" , lowercase_ )
self.assertIn("""attention_mask""" , lowercase_ )
self.assertNotIn("""labels""" , lowercase_ )
self.assertNotIn("""decoder_attention_mask""" , lowercase_ )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Optional[int] = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase_ : str = tokenizer(text_target=lowercase_ , max_length=32 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase_ : List[Any] = tokenizer(
["""I am a small frog""" * 1024, """I am a small frog"""] , padding=lowercase_ , truncation=lowercase_ , return_tensors="""pt""" )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : str = ["""A long paragraph for summarization."""]
lowercase_ : Optional[int] = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase_ : int = tokenizer(lowercase_ , return_tensors="""pt""" )
lowercase_ : Tuple = tokenizer(text_target=lowercase_ , return_tensors="""pt""" )
lowercase_ : List[Any] = inputs["""input_ids"""]
lowercase_ : Optional[Any] = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase_ : str = ["""Summary of the text.""", """Another summary."""]
lowercase_ : Dict = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
lowercase_ : Optional[int] = tokenizer(lowercase_ , padding=lowercase_ )
lowercase_ : Optional[Any] = [[0] * len(lowercase_ ) for x in encoded_output["""input_ids"""]]
lowercase_ : List[Any] = tokenizer.pad(lowercase_ )
self.assertSequenceEqual(outputs["""global_attention_mask"""] , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
pass
def SCREAMING_SNAKE_CASE_ ( self : Any ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
lowercase_ : Tuple = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
lowercase_ : Optional[Any] = """A, <mask> AllenNLP sentence."""
lowercase_ : Dict = tokenizer_r.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ )
lowercase_ : Tuple = tokenizer_p.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
lowercase_ : List[str] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
lowercase_ : Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
lowercase_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
lowercase_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 701 | '''simple docstring'''
import argparse
_lowercase : Optional[int] = "docs/source/_static/js/custom.js"
def lowerCamelCase ( UpperCAmelCase__ : Tuple ) -> Dict:
with open(UpperCAmelCase__ , encoding="""utf-8""" , newline="""\n""" ) as f:
lowercase_ : Optional[int] = f.readlines()
lowercase_ : Tuple = 0
# First let's put the right version
while not lines[index].startswith("""const stableVersion =""" ):
index += 1
lowercase_ : Optional[Any] = F'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith("""const versionMapping = {""" ):
index += 1
# We go until the end
while not lines[index].startswith("""}""" ):
index += 1
# We add the new version at the end
lines[index - 1] += F''' "v{version}": "v{version}",\n'''
with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(UpperCAmelCase__ )
if __name__ == "__main__":
_lowercase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--version", help="Release version.")
_lowercase : Dict = parser.parse_args()
update_custom_js(args.version)
| 30 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=3 , lowerCAmelCase=18 , lowerCAmelCase=30 , lowerCAmelCase=400 , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=True , ) -> List[Any]:
SCREAMING_SNAKE_CASE__: str= size if size is not None else {'''height''': 18, '''width''': 18}
SCREAMING_SNAKE_CASE__: Optional[int]= parent
SCREAMING_SNAKE_CASE__: str= batch_size
SCREAMING_SNAKE_CASE__: Optional[Any]= num_channels
SCREAMING_SNAKE_CASE__: List[Any]= image_size
SCREAMING_SNAKE_CASE__: Any= min_resolution
SCREAMING_SNAKE_CASE__: Tuple= max_resolution
SCREAMING_SNAKE_CASE__: str= do_resize
SCREAMING_SNAKE_CASE__: Dict= size
SCREAMING_SNAKE_CASE__: List[str]= apply_ocr
def UpperCamelCase_ ( self ) -> Dict:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ):
__a = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def UpperCamelCase_ ( self ) -> Dict:
SCREAMING_SNAKE_CASE__: List[str]= LayoutLMvaImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: Optional[Any]= self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''size''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''apply_ocr''' ) )
def UpperCamelCase_ ( self ) -> Dict:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
SCREAMING_SNAKE_CASE__: Optional[Any]= self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def UpperCamelCase_ ( self ) -> str:
pass
def UpperCamelCase_ ( self ) -> Optional[Any]:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: Optional[int]= self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__: Tuple= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__: Optional[Any]= image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , lowerCAmelCase )
self.assertIsInstance(encoding.boxes , lowerCAmelCase )
# Test batched
SCREAMING_SNAKE_CASE__: Optional[Any]= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def UpperCamelCase_ ( self ) -> Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: int= self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__: int= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__: str= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__: Union[str, Any]= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def UpperCamelCase_ ( self ) -> Optional[Any]:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: Dict= self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__: List[Any]= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__: Any= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__: Optional[int]= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def UpperCamelCase_ ( self ) -> Any:
# with apply_OCR = True
SCREAMING_SNAKE_CASE__: List[Any]= LayoutLMvaImageProcessor()
from datasets import load_dataset
SCREAMING_SNAKE_CASE__: Dict= load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
SCREAMING_SNAKE_CASE__: str= Image.open(ds[0]['''file'''] ).convert('''RGB''' )
SCREAMING_SNAKE_CASE__: int= image_processing(lowerCAmelCase , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
SCREAMING_SNAKE_CASE__: Union[str, Any]= [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
SCREAMING_SNAKE_CASE__: Dict= [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , lowerCAmelCase )
self.assertListEqual(encoding.boxes , lowerCAmelCase )
# with apply_OCR = False
SCREAMING_SNAKE_CASE__: str= LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Union[str, Any]= image_processing(lowerCAmelCase , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 64 |
'''simple docstring'''
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser()
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--txt2img_unclip',
default='kakaobrain/karlo-v1-alpha',
type=str,
required=False,
help='The pretrained txt2img unclip.',
)
lowercase__ =parser.parse_args()
lowercase__ =UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
lowercase__ =CLIPImageProcessor()
lowercase__ =CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14')
lowercase__ =UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 263 | 0 |
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return (data["data"], data["target"])
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : List[Any] = XGBRegressor(verbosity=0 ,random_state=4_2 )
xgb.fit(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# Predict target for test data
lowerCAmelCase : Union[str, Any] = xgb.predict(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Optional[int] = predictions.reshape(len(SCREAMING_SNAKE_CASE__ ) ,1 )
return predictions
def _UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = fetch_california_housing()
lowerCAmelCase , lowerCAmelCase : Any = data_handling(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Dict = train_test_split(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,test_size=0.25 ,random_state=1 )
lowerCAmelCase : List[Any] = xgboost(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# Error printing
print(F"""Mean Absolute Error : {mean_absolute_error(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )}""" )
print(F"""Mean Square Error : {mean_squared_error(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 693 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase : int ={
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] =['PoolFormerFeatureExtractor']
lowerCAmelCase : List[str] =['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple =[
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Any =_LazyModule(__name__, globals()['__file__'], _import_structure)
| 693 | 1 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_6_0_0, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_6_0_0, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
] )
class __lowercase ( unittest.TestCase ):
def __lowercase ( self : str ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() ,encoding="""utf-8""" ,check=A ,)
assert hasattr(self ,"""env""" )
def __lowercase ( self : Any ,A : Any ):
'''simple docstring'''
# configuration for running training on smdistributed Model Parallel
UpperCAmelCase__ : Tuple = {
"""enabled""": True,
"""processes_per_host""": 8,
}
UpperCAmelCase__ : str = {
"""enabled""": True,
"""parameters""": {
"""microbatches""": 4,
"""placement_strategy""": """spread""",
"""pipeline""": """interleaved""",
"""optimize""": """speed""",
"""partitions""": 4,
"""ddp""": True,
},
}
UpperCAmelCase__ : Optional[int] = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options}
UpperCAmelCase__ : Union[str, Any] = """trainer""" if self.script == """run_glue.py""" else """smtrainer"""
# creates estimator
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=f"{self.env.base_job_name}-{instance_count}-smp-{name_extension}" ,instance_count=A ,instance_type=self.instance_type ,debugger_hook_config=A ,hyperparameters={
**self.env.hyperparameters,
"""model_name_or_path""": self.model_name_or_path,
"""max_steps""": 500,
} ,metric_definitions=self.env.metric_definitions ,distribution=A ,py_version="""py36""" ,)
def __lowercase ( self : Dict ,A : Tuple ):
'''simple docstring'''
TrainingJobAnalytics(A ).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv" )
@parameterized.expand([(1,)] )
def __lowercase ( self : Tuple ,A : Union[str, Any] ):
'''simple docstring'''
# create estimator
UpperCAmelCase__ : Optional[int] = self.create_estimator(A )
# run training
estimator.fit()
# result dataframe
UpperCAmelCase__ : List[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCAmelCase__ : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
UpperCAmelCase__ : List[str] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase__ : Any = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" ,999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"{estimator.latest_training_job.name}.json" ,"""w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} ,A )
| 65 |
'''simple docstring'''
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def UpperCAmelCase_ ( A ):
'''simple docstring'''
_a : Dict = args.pruning_method
_a : Optional[Any] = args.threshold
_a : Union[str, Any] = args.model_name_or_path.rstrip('/' )
_a : Dict = args.target_model_path
print(f'''Load fine-pruned model from {model_name_or_path}''' )
_a : List[str] = torch.load(os.path.join(A , 'pytorch_model.bin' ) )
_a : str = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_a : List[Any] = tensor
print(f'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
_a : List[str] = tensor
print(f'''Copied layer {name}''' )
elif "bias" in name:
_a : Optional[Any] = tensor
print(f'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
_a : Tuple = MagnitudeBinarizer.apply(inputs=A , threshold=A )
_a : Union[str, Any] = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_a : str = name[:-6]
_a : int = model[f'''{prefix_}mask_scores''']
_a : List[str] = TopKBinarizer.apply(A , A )
_a : Tuple = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_a : str = name[:-6]
_a : List[str] = model[f'''{prefix_}mask_scores''']
_a : Tuple = ThresholdBinarizer.apply(A , A , A )
_a : Union[str, Any] = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_a : Optional[int] = name[:-6]
_a : Any = model[f'''{prefix_}mask_scores''']
_a , _a : Optional[int] = -0.1, 1.1
_a : List[str] = torch.sigmoid(A )
_a : Dict = s * (r - l) + l
_a : int = s_bar.clamp(min=0.0 , max=1.0 )
_a : List[str] = tensor * mask
print(f'''Pruned layer {name}''' )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
_a : Tuple = os.path.join(
os.path.dirname(A ) , f'''bertarized_{os.path.basename(A )}''' )
if not os.path.isdir(A ):
shutil.copytree(A , A )
print(f'''\nCreated folder {target_model_path}''' )
torch.save(A , os.path.join(A , 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
parser.add_argument(
"--pruning_method",
choices=["l0", "magnitude", "topK", "sigmoied_threshold"],
type=str,
required=True,
help=(
"Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"
" sigmoied_threshold = Soft movement pruning)"
),
)
parser.add_argument(
"--threshold",
type=float,
required=False,
help=(
"For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."
"For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."
"Not needed for `l0`"
),
)
parser.add_argument(
"--model_name_or_path",
type=str,
required=True,
help="Folder containing the model that was previously fine-pruned",
)
parser.add_argument(
"--target_model_path",
default=None,
type=str,
required=False,
help="Folder containing the model that was previously fine-pruned",
)
UpperCAmelCase_ : Optional[Any] = parser.parse_args()
main(args)
| 120 | 0 |
'''simple docstring'''
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class a_ ( UpperCAmelCase__ ):
def __lt__( self : Optional[Any] , __lowerCAmelCase : List[Any] ):
return self[-1] < other[-1]
def __eq__( self : Dict , __lowerCAmelCase : Optional[Any] ):
return self[-1] == other[-1]
def lowerCamelCase__ ( a ):
__snake_case = []
# sort into stacks
for element in collection:
__snake_case = Stack([element] )
__snake_case = bisect_left(a , a )
if i != len(a ):
stacks[i].append(a )
else:
stacks.append(a )
# use a heap-based merge to merge stack efficiently
__snake_case = merge(*(reversed(a ) for stack in stacks) )
return collection
if __name__ == "__main__":
_lowercase = input("""Enter numbers separated by a comma:\n""").strip()
_lowercase = [int(item) for item in user_input.split(""",""")]
print(patience_sort(unsorted))
| 427 |
'''simple docstring'''
import argparse
import copy
def lowerCamelCase__ ( a ):
__snake_case = {}
with open(a ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
__snake_case = []
_list.append([line.split()[1], line.split()[2]] )
__snake_case = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
__snake_case = []
_list.append([line.split()[0], line.split()[2]] )
__snake_case = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def lowerCamelCase__ ( a , a ):
with open(a ) as f:
__snake_case = f.read(1 )
__snake_case = start_node
__snake_case = []
__snake_case = start_node
__snake_case = 0
while visiting not in first_solution:
__snake_case = 10000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(a ) and k[0] not in first_solution:
__snake_case = k[1]
__snake_case = k[0]
first_solution.append(a )
__snake_case = distance_of_first_solution + int(a )
__snake_case = best_node
first_solution.append(a )
__snake_case = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
__snake_case = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10000
)
return first_solution, distance_of_first_solution
def lowerCamelCase__ ( a , a ):
__snake_case = []
for n in solution[1:-1]:
__snake_case = solution.index(a )
for kn in solution[1:-1]:
__snake_case = solution.index(a )
if n == kn:
continue
__snake_case = copy.deepcopy(a )
__snake_case = kn
__snake_case = n
__snake_case = 0
for k in _tmp[:-1]:
__snake_case = _tmp[_tmp.index(a ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
__snake_case = distance + int(i[1] )
_tmp.append(a )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
__snake_case = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda a : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def lowerCamelCase__ ( a , a , a , a , a ):
__snake_case = 1
__snake_case = first_solution
__snake_case = []
__snake_case = distance_of_first_solution
__snake_case = solution
while count <= iters:
__snake_case = find_neighborhood(a , a )
__snake_case = 0
__snake_case = neighborhood[index_of_best_solution]
__snake_case = len(a ) - 1
__snake_case = False
while not found:
__snake_case = 0
while i < len(a ):
if best_solution[i] != solution[i]:
__snake_case = best_solution[i]
__snake_case = solution[i]
break
__snake_case = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
__snake_case = True
__snake_case = best_solution[:-1]
__snake_case = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
__snake_case = cost
__snake_case = solution
else:
__snake_case = index_of_best_solution + 1
__snake_case = neighborhood[index_of_best_solution]
if len(a ) >= size:
tabu_list.pop(0 )
__snake_case = count + 1
return best_solution_ever, best_cost
def lowerCamelCase__ ( a=None ):
__snake_case = generate_neighbours(args.File )
__snake_case , __snake_case = generate_first_solution(
args.File , a )
__snake_case , __snake_case = tabu_search(
a , a , a , args.Iterations , args.Size , )
print(f'Best solution: {best_sol}, with total distance: {best_cost}.' )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser(description="""Tabu Search""")
parser.add_argument(
"""-f""",
"""--File""",
type=str,
help="""Path to the file containing the data""",
required=True,
)
parser.add_argument(
"""-i""",
"""--Iterations""",
type=int,
help="""How many iterations the algorithm should perform""",
required=True,
)
parser.add_argument(
"""-s""", """--Size""", type=int, help="""Size of the tabu list""", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 427 | 1 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def __UpperCamelCase ( lowercase__ : int = 3 ) -> qiskit.result.counts.Counts:
'''simple docstring'''
if isinstance(lowercase__ , lowercase__ ):
raise TypeError("""number of qubits must be a integer.""" )
if number_of_qubits <= 0:
raise ValueError("""number of qubits must be > 0.""" )
if math.floor(lowercase__ ) != number_of_qubits:
raise ValueError("""number of qubits must be exact integer.""" )
if number_of_qubits > 10:
raise ValueError("""number of qubits too large to simulate(>10).""" )
lowerCAmelCase_ : Any = QuantumRegister(lowercase__ , """qr""" )
lowerCAmelCase_ : List[Any] = ClassicalRegister(lowercase__ , """cr""" )
lowerCAmelCase_ : List[str] = QuantumCircuit(lowercase__ , lowercase__ )
lowerCAmelCase_ : Any = number_of_qubits
for i in range(lowercase__ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(lowercase__ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , lowercase__ , lowercase__ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(lowercase__ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(lowercase__ , lowercase__ )
# simulate with 10000 shots
lowerCAmelCase_ : str = Aer.get_backend("""qasm_simulator""" )
lowerCAmelCase_ : Optional[Any] = execute(lowercase__ , lowercase__ , shots=10000 )
return job.result().get_counts(lowercase__ )
if __name__ == "__main__":
print(
f"""Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"""
)
| 600 |
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def __UpperCamelCase ( lowercase__ : Any ) -> List[Any]:
'''simple docstring'''
return EnvironmentCommand()
class __a ( __UpperCamelCase ):
@staticmethod
def A ( UpperCAmelCase : ArgumentParser ):
lowerCAmelCase_ : Optional[Any] = parser.add_parser("""env""" )
download_parser.set_defaults(func=UpperCAmelCase )
def A ( self : Dict ):
lowerCAmelCase_ : int = huggingface_hub.__version__
lowerCAmelCase_ : int = """not installed"""
lowerCAmelCase_ : List[str] = """NA"""
if is_torch_available():
import torch
lowerCAmelCase_ : Any = torch.__version__
lowerCAmelCase_ : Tuple = torch.cuda.is_available()
lowerCAmelCase_ : List[str] = """not installed"""
if is_transformers_available():
import transformers
lowerCAmelCase_ : Optional[int] = transformers.__version__
lowerCAmelCase_ : int = """not installed"""
if is_accelerate_available():
import accelerate
lowerCAmelCase_ : Optional[int] = accelerate.__version__
lowerCAmelCase_ : Tuple = """not installed"""
if is_xformers_available():
import xformers
lowerCAmelCase_ : Tuple = xformers.__version__
lowerCAmelCase_ : Dict = {
"""`diffusers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""PyTorch version (GPU?)""": F'{pt_version} ({pt_cuda_available})',
"""Huggingface_hub version""": hub_version,
"""Transformers version""": transformers_version,
"""Accelerate version""": accelerate_version,
"""xFormers version""": xformers_version,
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(UpperCAmelCase ) )
return info
@staticmethod
def A ( UpperCAmelCase : Any ):
return "\n".join([F'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
| 600 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase = {
'''configuration_graphormer''': ['''GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GraphormerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
'''GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GraphormerForGraphClassification''',
'''GraphormerModel''',
'''GraphormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 709 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class snake_case_ :
"""simple docstring"""
def __init__( self , _A , _A=None , _A=None , _A=None , _A="resnet50" , _A=3 , _A=3_2 , _A=3 , _A=True , _A=True , ):
__lowerCAmelCase = parent
__lowerCAmelCase = out_indices if out_indices is not None else [4]
__lowerCAmelCase = stage_names
__lowerCAmelCase = out_features
__lowerCAmelCase = backbone
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = use_pretrained_backbone
__lowerCAmelCase = is_training
def A__ ( self ):
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = self.get_config()
return config, pixel_values
def A__ ( self ):
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def A__ ( self , _A , _A ):
__lowerCAmelCase = TimmBackbone(config=_A )
model.to(_A )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(_A )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 1_4, 1_4) , )
def A__ ( self ):
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase, __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class snake_case_ ( _a , _a , _a , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase =(TimmBackbone,) if is_torch_available() else ()
__UpperCAmelCase ={"""feature-extraction""": TimmBackbone} if is_torch_available() else {}
__UpperCAmelCase =False
__UpperCAmelCase =False
__UpperCAmelCase =False
__UpperCAmelCase =False
def A__ ( self ):
__lowerCAmelCase = TimmBackboneModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=_A , has_text_modality=_A )
def A__ ( self ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self ):
__lowerCAmelCase = 'resnet18'
__lowerCAmelCase = 'microsoft/resnet-18'
__lowerCAmelCase = AutoBackbone.from_pretrained(_A , use_timm_backbone=_A )
__lowerCAmelCase = AutoBackbone.from_pretrained(_A )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__lowerCAmelCase = AutoBackbone.from_pretrained(_A , use_timm_backbone=_A , out_indices=[1, 2, 3] )
__lowerCAmelCase = AutoBackbone.from_pretrained(_A , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def A__ ( self ):
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def A__ ( self ):
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def A__ ( self ):
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def A__ ( self ):
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def A__ ( self ):
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def A__ ( self ):
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def A__ ( self ):
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def A__ ( self ):
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def A__ ( self ):
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def A__ ( self ):
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def A__ ( self ):
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def A__ ( self ):
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def A__ ( self ):
pass
@unittest.skip('Safetensors is not supported by timm.' )
def A__ ( self ):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def A__ ( self ):
pass
def A__ ( self ):
__lowerCAmelCase, __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(_A )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , _A )
def A__ ( self ):
__lowerCAmelCase, __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
__lowerCAmelCase = self.has_attentions
# no need to test all models as different heads yield the same functionality
__lowerCAmelCase = self.all_model_classes[0]
__lowerCAmelCase = model_class(_A )
model.to(_A )
__lowerCAmelCase = self._prepare_for_class(_A , _A )
__lowerCAmelCase = model(**_A )
__lowerCAmelCase = outputs[0][-1]
# Encoder-/Decoder-only models
__lowerCAmelCase = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__lowerCAmelCase = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=_A )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def A__ ( self ):
__lowerCAmelCase, __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(_A )
model.to(_A )
model.eval()
__lowerCAmelCase = model(**_A )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__lowerCAmelCase = copy.deepcopy(_A )
__lowerCAmelCase = None
__lowerCAmelCase = model_class(_A )
model.to(_A )
model.eval()
__lowerCAmelCase = model(**_A )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__lowerCAmelCase = copy.deepcopy(_A )
__lowerCAmelCase = False
__lowerCAmelCase = model_class(_A )
model.to(_A )
model.eval()
__lowerCAmelCase = model(**_A )
| 102 | 0 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
__lowerCAmelCase = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"text-classification",
"language-modeling",
"summarization",
"token-classification",
"question-answering",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
__lowerCAmelCase = logging.getLogger()
def __lowerCamelCase ( ) -> Optional[Any]:
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("-f" )
_UpperCAmelCase = parser.parse_args()
return args.f
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase="eval" ) -> Optional[Any]:
_UpperCAmelCase = os.path.join(_lowerCAmelCase , F'''{split}_results.json''' )
if os.path.exists(_lowerCAmelCase ):
with open(_lowerCAmelCase , "r" ) as f:
return json.load(_lowerCAmelCase )
raise ValueError(F'''can\'t find {path}''' )
__lowerCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __SCREAMING_SNAKE_CASE ( lowercase):
def UpperCAmelCase__ ( self : Tuple ):
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = F'''
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(__UpperCamelCase , "argv" , __UpperCamelCase ):
run_flax_glue.main()
_UpperCAmelCase = get_results(__UpperCamelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
@slow
def UpperCAmelCase__ ( self : Optional[int] ):
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = F'''
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(__UpperCamelCase , "argv" , __UpperCamelCase ):
run_clm_flax.main()
_UpperCAmelCase = get_results(__UpperCamelCase )
self.assertLess(result["eval_perplexity"] , 100 )
@slow
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = F'''
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
'''.split()
with patch.object(__UpperCamelCase , "argv" , __UpperCamelCase ):
run_summarization_flax.main()
_UpperCAmelCase = get_results(__UpperCamelCase , split="test" )
self.assertGreaterEqual(result["test_rouge1"] , 10 )
self.assertGreaterEqual(result["test_rouge2"] , 2 )
self.assertGreaterEqual(result["test_rougeL"] , 7 )
self.assertGreaterEqual(result["test_rougeLsum"] , 7 )
@slow
def UpperCAmelCase__ ( self : int ):
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = F'''
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
'''.split()
with patch.object(__UpperCamelCase , "argv" , __UpperCamelCase ):
run_mlm_flax.main()
_UpperCAmelCase = get_results(__UpperCamelCase )
self.assertLess(result["eval_perplexity"] , 42 )
@slow
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = F'''
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(__UpperCamelCase , "argv" , __UpperCamelCase ):
run_ta_mlm_flax.main()
_UpperCAmelCase = get_results(__UpperCamelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.42 )
@slow
def UpperCAmelCase__ ( self : Optional[Any] ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
_UpperCAmelCase = 7 if get_gpu_count() > 1 else 2
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = F'''
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
'''.split()
with patch.object(__UpperCamelCase , "argv" , __UpperCamelCase ):
run_flax_ner.main()
_UpperCAmelCase = get_results(__UpperCamelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertGreaterEqual(result["eval_f1"] , 0.3 )
@slow
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = F'''
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
'''.split()
with patch.object(__UpperCamelCase , "argv" , __UpperCamelCase ):
run_qa.main()
_UpperCAmelCase = get_results(__UpperCamelCase )
self.assertGreaterEqual(result["eval_f1"] , 30 )
self.assertGreaterEqual(result["eval_exact"] , 30 )
| 684 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__lowerCAmelCase = random.Random()
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=1.0 , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> List[str]:
if rng is None:
_UpperCAmelCase = global_rng
_UpperCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def __init__( self : Optional[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any]=7 , __UpperCamelCase : Union[str, Any]=400 , __UpperCamelCase : List[Any]=2_000 , __UpperCamelCase : Optional[Any]=10 , __UpperCamelCase : Optional[int]=160 , __UpperCamelCase : Any=8 , __UpperCamelCase : List[Any]=0.0 , __UpperCamelCase : Dict=4_000 , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : Tuple=True , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = min_seq_length
_UpperCAmelCase = max_seq_length
_UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_UpperCAmelCase = padding_value
_UpperCAmelCase = sampling_rate
_UpperCAmelCase = return_attention_mask
_UpperCAmelCase = do_normalize
_UpperCAmelCase = feature_size
_UpperCAmelCase = chunk_length
_UpperCAmelCase = hop_length
def UpperCAmelCase__ ( self : Optional[Any] ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : Tuple=False , __UpperCamelCase : Dict=False ):
def _flatten(__UpperCamelCase : Any ):
return list(itertools.chain(*__UpperCamelCase ) )
if equal_length:
_UpperCAmelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_UpperCAmelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_UpperCAmelCase = [np.asarray(__UpperCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE ( lowercase , unittest.TestCase):
__SCREAMING_SNAKE_CASE : str = WhisperFeatureExtractor if is_speech_available() else None
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = WhisperFeatureExtractionTester(self )
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = feat_extract_first.save_pretrained(__UpperCamelCase )[0]
check_json_file_has_correct_format(__UpperCamelCase )
_UpperCAmelCase = self.feature_extraction_class.from_pretrained(__UpperCamelCase )
_UpperCAmelCase = feat_extract_first.to_dict()
_UpperCAmelCase = feat_extract_second.to_dict()
_UpperCAmelCase = feat_extract_first.mel_filters
_UpperCAmelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = os.path.join(__UpperCamelCase , "feat_extract.json" )
feat_extract_first.to_json_file(__UpperCamelCase )
_UpperCAmelCase = self.feature_extraction_class.from_json_file(__UpperCamelCase )
_UpperCAmelCase = feat_extract_first.to_dict()
_UpperCAmelCase = feat_extract_second.to_dict()
_UpperCAmelCase = feat_extract_first.mel_filters
_UpperCAmelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase__ ( self : int ):
# Tests that all call wrap to encode_plus and batch_encode_plus
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
_UpperCAmelCase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs]
# Test feature size
_UpperCAmelCase = feature_extractor(__UpperCamelCase , padding="max_length" , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_UpperCAmelCase = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
# Test batched
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_UpperCAmelCase = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_UpperCAmelCase = np.asarray(__UpperCamelCase )
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
# Test truncation required
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
_UpperCAmelCase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs]
_UpperCAmelCase = [x[: feature_extractor.n_samples] for x in speech_inputs]
_UpperCAmelCase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs_truncated]
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
def UpperCAmelCase__ ( self : Union[str, Any] ):
import torch
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = np.random.rand(100 , 32 ).astype(np.floataa )
_UpperCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_UpperCAmelCase = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_UpperCAmelCase = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : Tuple ):
_UpperCAmelCase = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
_UpperCAmelCase = ds.sort("id" ).select(range(__UpperCamelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def UpperCAmelCase__ ( self : Tuple ):
# fmt: off
_UpperCAmelCase = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
_UpperCAmelCase = self._load_datasamples(1 )
_UpperCAmelCase = WhisperFeatureExtractor()
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="pt" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3_000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , __UpperCamelCase , atol=1e-4 ) )
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = self._load_datasamples(1 )[0]
_UpperCAmelCase = ((audio - audio.min()) / (audio.max() - audio.min())) * 65_535 # Rescale to [0, 65535] to show issue
_UpperCAmelCase = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__UpperCamelCase )[0]
self.assertTrue(np.all(np.mean(__UpperCamelCase ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__UpperCamelCase ) - 1 ) < 1e-3 ) )
| 684 | 1 |
"""simple docstring"""
import numpy as np
def lowerCAmelCase_( lowercase_ : np.array ) -> np.array:
return 1 / (1 + np.exp(-vector ))
def lowerCAmelCase_( lowercase_ : np.array ) -> np.array:
return vector * sigmoid(1.7_0_2 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 |
"""simple docstring"""
import numpy as np
def lowerCAmelCase_( lowercase_ : np.array ) -> np.array:
return 1 / (1 + np.exp(-vector ))
def lowerCAmelCase_( lowercase_ : np.array ) -> np.array:
return vector * sigmoid(1.7_0_2 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 623 | 0 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class A ( unittest.TestCase ):
def lowerCamelCase ( self : Union[str, Any] , lowercase_ : Tuple ) -> List[str]:
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
_lowerCamelCase : List[str] =model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(lowercase_ )
def lowerCamelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
_lowerCamelCase : Any ='sshleifer/tiny-gpt2'
_lowerCamelCase : str =TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=lowercase_ , multi_process=lowercase_ , )
_lowerCamelCase : Union[str, Any] =TensorFlowBenchmark(lowercase_ )
_lowerCamelCase : List[Any] =benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
_lowerCamelCase : Optional[Any] ='sgugger/tiny-distilbert-classification'
_lowerCamelCase : int =TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , only_pretrain_model=lowercase_ , )
_lowerCamelCase : Tuple =TensorFlowBenchmark(lowercase_ )
_lowerCamelCase : Tuple =benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
_lowerCamelCase : int ='sshleifer/tiny-gpt2'
_lowerCamelCase : Dict =TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , )
_lowerCamelCase : Union[str, Any] =TensorFlowBenchmark(lowercase_ )
_lowerCamelCase : Optional[int] =benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase ( self : List[Any] ) -> str:
"""simple docstring"""
_lowerCamelCase : Any ='sshleifer/tiny-gpt2'
_lowerCamelCase : str =AutoConfig.from_pretrained(lowercase_ )
_lowerCamelCase : Union[str, Any] =TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=lowercase_ , multi_process=lowercase_ , )
_lowerCamelCase : int =TensorFlowBenchmark(lowercase_ , [config] )
_lowerCamelCase : int =benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] ='sshleifer/tiny-gpt2'
_lowerCamelCase : int =AutoConfig.from_pretrained(lowercase_ )
_lowerCamelCase : List[Any] =TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , )
_lowerCamelCase : Optional[int] =TensorFlowBenchmark(lowercase_ , [config] )
_lowerCamelCase : Union[str, Any] =benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase ( self : Any ) -> List[str]:
"""simple docstring"""
_lowerCamelCase : Optional[int] ='sshleifer/tiny-gpt2'
_lowerCamelCase : Optional[Any] =TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , )
_lowerCamelCase : Optional[Any] =TensorFlowBenchmark(lowercase_ )
_lowerCamelCase : List[Any] =benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowerCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] ='sshleifer/tiny-gpt2'
_lowerCamelCase : Tuple =AutoConfig.from_pretrained(lowercase_ )
_lowerCamelCase : Optional[int] =TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , )
_lowerCamelCase : Optional[int] =TensorFlowBenchmark(lowercase_ , [config] )
_lowerCamelCase : List[Any] =benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowerCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase : Tuple ='patrickvonplaten/t5-tiny-random'
_lowerCamelCase : str =AutoConfig.from_pretrained(lowercase_ )
_lowerCamelCase : str =TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , )
_lowerCamelCase : str =TensorFlowBenchmark(lowercase_ , configs=[config] )
_lowerCamelCase : Optional[Any] =benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 , 'Cannot do xla on CPU.' )
def lowerCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] ='sshleifer/tiny-gpt2'
_lowerCamelCase : Tuple =TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=lowercase_ , multi_process=lowercase_ , )
_lowerCamelCase : Optional[int] =TensorFlowBenchmark(lowercase_ )
_lowerCamelCase : Any =benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_lowerCamelCase : Optional[Any] ='sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : Optional[int] =TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=lowercase_ , save_to_csv=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(lowercase_ , 'inf_time.csv' ) , inference_memory_csv_file=os.path.join(lowercase_ , 'inf_mem.csv' ) , env_info_csv_file=os.path.join(lowercase_ , 'env.csv' ) , multi_process=lowercase_ , )
_lowerCamelCase : Optional[Any] =TensorFlowBenchmark(lowercase_ )
benchmark.run()
self.assertTrue(Path(os.path.join(lowercase_ , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase_ , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase_ , 'env.csv' ) ).exists() )
def lowerCamelCase ( self : int ) -> Any:
"""simple docstring"""
_lowerCamelCase : int ='sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(lowercase_ : Optional[Any] ):
self.assertTrue(hasattr(lowercase_ , 'sequential' ) )
self.assertTrue(hasattr(lowercase_ , 'cumulative' ) )
self.assertTrue(hasattr(lowercase_ , 'current' ) )
self.assertTrue(hasattr(lowercase_ , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : List[str] =TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(lowercase_ , 'log.txt' ) , log_print=lowercase_ , trace_memory_line_by_line=lowercase_ , eager_mode=lowercase_ , multi_process=lowercase_ , )
_lowerCamelCase : Optional[Any] =TensorFlowBenchmark(lowercase_ )
_lowerCamelCase : Union[str, Any] =benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(lowercase_ , 'log.txt' ) ).exists() )
| 464 |
import numpy as np
def a_ ( SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : float ):
'''simple docstring'''
return np.where(vector > 0 , SCREAMING_SNAKE_CASE__ , (alpha * (np.exp(SCREAMING_SNAKE_CASE__ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 464 | 1 |
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class __a ( __a ):
"""simple docstring"""
_A : int = "facebook/bart-large-mnli"
_A : Optional[Any] = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
_A : Dict = "text_classifier"
_A : str = AutoTokenizer
_A : List[str] = AutoModelForSequenceClassification
_A : str = ["text", ["text"]]
_A : List[Any] = ["text"]
def __A ( self : str ) -> int:
'''simple docstring'''
super().setup()
SCREAMING_SNAKE_CASE__ =self.model.config
SCREAMING_SNAKE_CASE__ =-1
for idx, label in config.idalabel.items():
if label.lower().startswith("""entail""" ):
SCREAMING_SNAKE_CASE__ =int(lowerCAmelCase_ )
if self.entailment_id == -1:
raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""" )
def __A ( self : str ,_UpperCamelCase : Dict ,_UpperCamelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =labels
return self.pre_processor(
[text] * len(lowerCAmelCase_ ) ,[f"""This example is {label}""" for label in labels] ,return_tensors="""pt""" ,padding="""max_length""" ,)
def __A ( self : List[str] ,_UpperCamelCase : Optional[int] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =outputs.logits
SCREAMING_SNAKE_CASE__ =torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 719 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 6_50, "eval_accuracy": 0.6, "eval_loss": 0.9},
},
{
"framework": "tensorflow",
"script": "run_tf.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 6_00, "eval_accuracy": 0.3, "eval_loss": 0.9},
},
] )
class __a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() ,encoding="""utf-8""" ,check=_UpperCamelCase ,)
assert hasattr(self ,"""env""" )
def __A ( self : List[str] ,_UpperCamelCase : int=1 ) -> int:
'''simple docstring'''
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=f"""{self.env.base_job_name}-single""" ,instance_count=_UpperCamelCase ,instance_type=self.instance_type ,debugger_hook_config=_UpperCamelCase ,hyperparameters={**self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path} ,metric_definitions=self.env.metric_definitions ,py_version="""py36""" ,)
def __A ( self : Tuple ,_UpperCamelCase : Optional[int] ) -> List[Any]:
'''simple docstring'''
TrainingJobAnalytics(_UpperCamelCase ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
def __A ( self : Dict ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =self.create_estimator()
# run training
estimator.fit()
# result dataframe
SCREAMING_SNAKE_CASE__ =TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
SCREAMING_SNAKE_CASE__ =list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
SCREAMING_SNAKE_CASE__ =list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
SCREAMING_SNAKE_CASE__ =(
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" ,9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" ,"""w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} ,_UpperCamelCase )
| 588 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Tuple = {
"asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class _lowerCamelCase( _a ):
lowercase_ : Any = """sew-d"""
def __init__( self, lowerCamelCase=32, lowerCamelCase=7_68, lowerCamelCase=12, lowerCamelCase=12, lowerCamelCase=30_72, lowerCamelCase=2, lowerCamelCase=5_12, lowerCamelCase=2_56, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=("p2c", "c2p"), lowerCamelCase="layer_norm", lowerCamelCase="gelu_python", lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=0.0, lowerCamelCase=0.1, lowerCamelCase=0.0_2, lowerCamelCase=1E-7, lowerCamelCase=1E-5, lowerCamelCase="group", lowerCamelCase="gelu", lowerCamelCase=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12), lowerCamelCase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1), lowerCamelCase=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1), lowerCamelCase=False, lowerCamelCase=1_28, lowerCamelCase=16, lowerCamelCase=True, lowerCamelCase=0.0_5, lowerCamelCase=10, lowerCamelCase=2, lowerCamelCase=0.0, lowerCamelCase=10, lowerCamelCase=0, lowerCamelCase="mean", lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=2_56, lowerCamelCase=0, lowerCamelCase=1, lowerCamelCase=2, **lowerCamelCase, ) -> Optional[int]:
"""simple docstring"""
super().__init__(**lowerCamelCase, pad_token_id=lowerCamelCase, bos_token_id=lowerCamelCase, eos_token_id=lowerCamelCase)
_lowercase : Union[str, Any] = hidden_size
_lowercase : Dict = feat_extract_norm
_lowercase : str = feat_extract_activation
_lowercase : int = list(lowerCamelCase)
_lowercase : Optional[Any] = list(lowerCamelCase)
_lowercase : Any = list(lowerCamelCase)
_lowercase : List[str] = conv_bias
_lowercase : Tuple = num_conv_pos_embeddings
_lowercase : Dict = num_conv_pos_embedding_groups
_lowercase : Dict = len(self.conv_dim)
_lowercase : int = num_hidden_layers
_lowercase : Optional[Any] = intermediate_size
_lowercase : str = squeeze_factor
_lowercase : Optional[Any] = max_position_embeddings
_lowercase : Tuple = position_buckets
_lowercase : Union[str, Any] = share_att_key
_lowercase : str = relative_attention
_lowercase : List[Any] = norm_rel_ebd
_lowercase : int = list(lowerCamelCase)
_lowercase : Tuple = hidden_act
_lowercase : Union[str, Any] = num_attention_heads
_lowercase : int = hidden_dropout
_lowercase : Optional[Any] = attention_dropout
_lowercase : List[Any] = activation_dropout
_lowercase : Union[str, Any] = feat_proj_dropout
_lowercase : Optional[int] = final_dropout
_lowercase : str = layer_norm_eps
_lowercase : Any = feature_layer_norm_eps
_lowercase : Optional[Any] = initializer_range
_lowercase : Union[str, Any] = vocab_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
F'''but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.''')
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowercase : Any = apply_spec_augment
_lowercase : Dict = mask_time_prob
_lowercase : Optional[int] = mask_time_length
_lowercase : str = mask_time_min_masks
_lowercase : List[str] = mask_feature_prob
_lowercase : List[Any] = mask_feature_length
_lowercase : Tuple = mask_feature_min_masks
# ctc loss
_lowercase : List[str] = ctc_loss_reduction
_lowercase : Any = ctc_zero_infinity
# sequence classification
_lowercase : Any = use_weighted_layer_sum
_lowercase : int = classifier_proj_size
@property
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
return functools.reduce(operator.mul, self.conv_stride, 1)
| 89 |
def UpperCamelCase_( lowerCamelCase_ ) -> int:
if n == 1 or not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
return 0
elif n == 2:
return 1
else:
_lowercase : List[str] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : Tuple = 0
_lowercase : List[str] = 2
while digits < n:
index += 1
_lowercase : Optional[int] = len(str(fibonacci(lowerCamelCase_ ) ) )
return index
def UpperCamelCase_( lowerCamelCase_ = 1000 ) -> int:
return fibonacci_digits_index(lowerCamelCase_ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 89 | 1 |
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class A_ ( unittest.TestCase ):
def __init__( self : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Any=1_3 , __lowerCamelCase : str=7 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Any=True , __lowerCamelCase : Dict=9_9 , __lowerCamelCase : Any=3_2 , __lowerCamelCase : List[str]=5 , __lowerCamelCase : Tuple=4 , __lowerCamelCase : List[Any]=3_7 , __lowerCamelCase : Any="gelu" , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Tuple=5_1_2 , __lowerCamelCase : List[Any]=1_6 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : Any=0.02 , __lowerCamelCase : List[str]=4 , ) -> List[Any]:
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = seq_length
__magic_name__ = is_training
__magic_name__ = use_attention_mask
__magic_name__ = use_token_type_ids
__magic_name__ = use_labels
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = type_sequence_label_size
__magic_name__ = initializer_range
__magic_name__ = num_choices
def _snake_case ( self : Tuple ) -> str:
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ = None
if self.use_attention_mask:
__magic_name__ = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ = None
if self.use_token_type_ids:
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _snake_case ( self : int ) -> Optional[int]:
__magic_name__ = self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = config_and_inputs
__magic_name__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class A_ ( _A , unittest.TestCase ):
UpperCAmelCase__ = True
UpperCAmelCase__ = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _snake_case ( self : Union[str, Any] ) -> Union[str, Any]:
__magic_name__ = FlaxRoFormerModelTester(self )
@slow
def _snake_case ( self : str ) -> Optional[int]:
for model_class_name in self.all_model_classes:
__magic_name__ = model_class_name.from_pretrained("junnyu/roformer_chinese_small" , from_pt=UpperCamelCase__ )
__magic_name__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase__ )
@require_flax
class A_ ( unittest.TestCase ):
@slow
def _snake_case ( self : Any ) -> Any:
__magic_name__ = FlaxRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" )
__magic_name__ = jnp.array([[0, 1, 2, 3, 4, 5]] )
__magic_name__ = model(UpperCamelCase__ )[0]
__magic_name__ = 5_0_0_0_0
__magic_name__ = (1, 6, vocab_size)
self.assertEqual(output.shape , UpperCamelCase__ )
__magic_name__ = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
| 710 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def _lowerCAmelCase ( __lowerCamelCase:float , __lowerCamelCase:float , __lowerCamelCase:int ):
'''simple docstring'''
__magic_name__ = x
__magic_name__ = y
for step in range(__lowerCamelCase ): # noqa: B007
__magic_name__ = a * a - b * b + x
__magic_name__ = 2 * a * b + y
__magic_name__ = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _lowerCAmelCase ( __lowerCamelCase:float ):
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return (2_5_5, 2_5_5, 2_5_5)
def _lowerCAmelCase ( __lowerCamelCase:float ):
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_5_5 ) for i in colorsys.hsv_to_rgb(__lowerCamelCase , 1 , 1 ) )
def _lowerCAmelCase ( __lowerCamelCase:int = 8_0_0 , __lowerCamelCase:int = 6_0_0 , __lowerCamelCase:float = -0.6 , __lowerCamelCase:float = 0 , __lowerCamelCase:float = 3.2 , __lowerCamelCase:int = 5_0 , __lowerCamelCase:bool = True , ):
'''simple docstring'''
__magic_name__ = Image.new("RGB" , (image_width, image_height) )
__magic_name__ = img.load()
# loop through the image-coordinates
for image_x in range(__lowerCamelCase ):
for image_y in range(__lowerCamelCase ):
# determine the figure-coordinates based on the image-coordinates
__magic_name__ = figure_width / image_width * image_height
__magic_name__ = figure_center_x + (image_x / image_width - 0.5) * figure_width
__magic_name__ = figure_center_y + (image_y / image_height - 0.5) * figure_height
__magic_name__ = get_distance(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
__magic_name__ = get_color_coded_rgb(__lowerCamelCase )
else:
__magic_name__ = get_black_and_white_rgb(__lowerCamelCase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowercase = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 468 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
UpperCamelCase__ : List[Any] = logging.get_logger(__name__)
UpperCamelCase__ : str = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase__ : Optional[Any] = {
'''vocab_file''': {'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'''},
'''tokenizer_file''': {
'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'''
},
}
UpperCamelCase__ : Optional[int] = {'''mobilebert-uncased''': 5_12}
UpperCamelCase__ : Optional[int] = {}
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Optional[int] = VOCAB_FILES_NAMES
__a : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__a : List[str] = PRETRAINED_INIT_CONFIGURATION
__a : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : List[str] = MobileBertTokenizer
def __init__( self ,snake_case__=None ,snake_case__=None ,snake_case__=True ,snake_case__="[UNK]" ,snake_case__="[SEP]" ,snake_case__="[PAD]" ,snake_case__="[CLS]" ,snake_case__="[MASK]" ,snake_case__=True ,snake_case__=None ,**snake_case__ ,):
super().__init__(
snake_case__ ,tokenizer_file=snake_case__ ,do_lower_case=snake_case__ ,unk_token=snake_case__ ,sep_token=snake_case__ ,pad_token=snake_case__ ,cls_token=snake_case__ ,mask_token=snake_case__ ,tokenize_chinese_chars=snake_case__ ,strip_accents=snake_case__ ,**snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' ,snake_case__ ) != do_lower_case
or normalizer_state.get('strip_accents' ,snake_case__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,snake_case__ ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = getattr(snake_case__ ,normalizer_state.pop('type' ) )
SCREAMING_SNAKE_CASE_ : int = do_lower_case
SCREAMING_SNAKE_CASE_ : Any = strip_accents
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenize_chinese_chars
SCREAMING_SNAKE_CASE_ : Union[str, Any] = normalizer_class(**snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = do_lower_case
def snake_case ( self ,snake_case__ ,snake_case__=None ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
SCREAMING_SNAKE_CASE_ : Dict = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
SCREAMING_SNAKE_CASE_ : Optional[int] = self._tokenizer.model.save(snake_case__ ,name=snake_case__ )
return tuple(snake_case__ )
| 105 |
"""simple docstring"""
import argparse
import struct
import unittest
class a :
def __init__( self : List[str] , lowerCAmelCase : bytes ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =data
# Initialize hash values
SCREAMING_SNAKE_CASE_: int =[
0X6A_09E_667,
0XBB_67A_E85,
0X3C_6EF_372,
0XA5_4FF_53A,
0X51_0E5_27F,
0X9B_056_88C,
0X1F_83D_9AB,
0X5B_E0C_D19,
]
# Initialize round constants
SCREAMING_SNAKE_CASE_: List[Any] =[
0X42_8A2_F98,
0X71_374_491,
0XB5_C0F_BCF,
0XE9_B5D_BA5,
0X39_56C_25B,
0X59_F11_1F1,
0X92_3F8_2A4,
0XAB_1C5_ED5,
0XD8_07A_A98,
0X12_835_B01,
0X24_318_5BE,
0X55_0C7_DC3,
0X72_BE5_D74,
0X80_DEB_1FE,
0X9B_DC0_6A7,
0XC1_9BF_174,
0XE4_9B6_9C1,
0XEF_BE4_786,
0X0F_C19_DC6,
0X24_0CA_1CC,
0X2D_E92_C6F,
0X4A_748_4AA,
0X5C_B0A_9DC,
0X76_F98_8DA,
0X98_3E5_152,
0XA8_31C_66D,
0XB0_032_7C8,
0XBF_597_FC7,
0XC6_E00_BF3,
0XD5_A79_147,
0X06_CA6_351,
0X14_292_967,
0X27_B70_A85,
0X2E_1B2_138,
0X4D_2C6_DFC,
0X53_380_D13,
0X65_0A7_354,
0X76_6A0_ABB,
0X81_C2C_92E,
0X92_722_C85,
0XA2_BFE_8A1,
0XA8_1A6_64B,
0XC2_4B8_B70,
0XC7_6C5_1A3,
0XD1_92E_819,
0XD6_990_624,
0XF4_0E3_585,
0X10_6AA_070,
0X19_A4C_116,
0X1E_376_C08,
0X27_487_74C,
0X34_B0B_CB5,
0X39_1C0_CB3,
0X4E_D8A_A4A,
0X5B_9CC_A4F,
0X68_2E6_FF3,
0X74_8F8_2EE,
0X78_A56_36F,
0X84_C87_814,
0X8C_C70_208,
0X90_BEF_FFA,
0XA4_506_CEB,
0XBE_F9A_3F7,
0XC6_717_8F2,
]
SCREAMING_SNAKE_CASE_: int =self.preprocessing(self.data )
self.final_hash()
@staticmethod
def lowerCamelCase__ ( lowerCAmelCase : bytes ) -> bytes:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =B"""\x80""" + (B"""\x00""" * (63 - (len(lowerCAmelCase ) + 8) % 64))
SCREAMING_SNAKE_CASE_: List[Any] =struct.pack(""">Q""" , (len(lowerCAmelCase ) * 8) )
return data + padding + big_endian_integer
def lowerCamelCase__ ( self : Optional[Any] ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =[
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
SCREAMING_SNAKE_CASE_: Optional[int] =list(struct.unpack(""">16L""" , lowerCAmelCase ) )
# add 48 0-ed integers
words += [0] * 48
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
SCREAMING_SNAKE_CASE_: Any =(
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
SCREAMING_SNAKE_CASE_: Tuple =(
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
SCREAMING_SNAKE_CASE_: List[str] =(
words[index - 16] + sa + words[index - 7] + sa
) % 0X100_000_000
# Compression
SCREAMING_SNAKE_CASE_: Dict =self.ror(lowerCAmelCase , 6 ) ^ self.ror(lowerCAmelCase , 11 ) ^ self.ror(lowerCAmelCase , 25 )
SCREAMING_SNAKE_CASE_: Any =(e & f) ^ ((~e & 0XFF_FFF_FFF) & g)
SCREAMING_SNAKE_CASE_: Union[str, Any] =(
h + sa + ch + self.round_constants[index] + words[index]
) % 0X100_000_000
SCREAMING_SNAKE_CASE_: List[Any] =self.ror(lowerCAmelCase , 2 ) ^ self.ror(lowerCAmelCase , 13 ) ^ self.ror(lowerCAmelCase , 22 )
SCREAMING_SNAKE_CASE_: int =(a & b) ^ (a & c) ^ (b & c)
SCREAMING_SNAKE_CASE_: Optional[int] =(sa + maj) % 0X100_000_000
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] =(
g,
f,
e,
((d + tempa) % 0X100_000_000),
c,
b,
a,
((tempa + tempa) % 0X100_000_000),
)
SCREAMING_SNAKE_CASE_: Dict =[a, b, c, d, e, f, g, h]
# Modify final values
SCREAMING_SNAKE_CASE_: List[Any] =[
((element + mutated_hash_values[index]) % 0X100_000_000)
for index, element in enumerate(self.hashes )
]
SCREAMING_SNAKE_CASE_: Tuple ="""""".join([hex(lowerCAmelCase )[2:].zfill(8 ) for value in self.hashes] )
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : int ) -> int:
'''simple docstring'''
return 0XFF_FFF_FFF & (value << (32 - rotations)) | (value >> rotations)
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : List[str] ) -> None:
'''simple docstring'''
import hashlib
SCREAMING_SNAKE_CASE_: Union[str, Any] =bytes("""Test String""" , """utf-8""" )
self.assertEqual(SHAaaa(lowerCAmelCase ).hash , hashlib.shaaaa(lowerCAmelCase ).hexdigest() )
def __magic_name__ ( ):
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE_: str =argparse.ArgumentParser()
parser.add_argument(
"""-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument(
"""-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
SCREAMING_SNAKE_CASE_: Any =parser.parse_args()
SCREAMING_SNAKE_CASE_: List[str] =args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
SCREAMING_SNAKE_CASE_: Optional[Any] =f.read()
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =bytes(lowercase , """utf-8""" )
print(SHAaaa(lowercase ).hash )
if __name__ == "__main__":
main()
| 409 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'nielsr/canine-s': 2048,
}
# Unicode defines 1,114,112 total “codepoints”
_lowerCamelCase = 1114112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
_lowerCamelCase = 0
_lowerCamelCase = 0Xe000
_lowerCamelCase = 0Xe001
_lowerCamelCase = 0Xe002
_lowerCamelCase = 0Xe003
_lowerCamelCase = 0Xe004
# Maps special codepoints to human-readable names.
_lowerCamelCase = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
_lowerCamelCase = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class lowerCamelCase_ ( lowercase ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , UpperCAmelCase__=chr(UpperCAmelCase__ ) , UpperCAmelCase__=chr(UpperCAmelCase__ ) , UpperCAmelCase__=chr(UpperCAmelCase__ ) , UpperCAmelCase__=chr(UpperCAmelCase__ ) , UpperCAmelCase__=chr(UpperCAmelCase__ ) , UpperCAmelCase__=chr(UpperCAmelCase__ ) , UpperCAmelCase__=False , UpperCAmelCase__=2048 , **UpperCAmelCase__ , ):
SCREAMING_SNAKE_CASE__ = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else bos_token
SCREAMING_SNAKE_CASE__ = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else eos_token
SCREAMING_SNAKE_CASE__ = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else sep_token
SCREAMING_SNAKE_CASE__ = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else cls_token
SCREAMING_SNAKE_CASE__ = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else mask_token
super().__init__(
bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , model_max_length=UpperCAmelCase__ , **UpperCAmelCase__ , )
# Creates a mapping for looking up the IDs of special symbols.
SCREAMING_SNAKE_CASE__ = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
SCREAMING_SNAKE_CASE__ = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
SCREAMING_SNAKE_CASE__ = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
SCREAMING_SNAKE_CASE__ = UNICODE_VOCAB_SIZE
SCREAMING_SNAKE_CASE__ = len(self._special_codepoints )
@property
def lowerCAmelCase__ ( self ):
return self._unicode_vocab_size
def lowerCAmelCase__ ( self , UpperCAmelCase__ ):
return list(UpperCAmelCase__ )
def lowerCAmelCase__ ( self , UpperCAmelCase__ ):
try:
return ord(UpperCAmelCase__ )
except TypeError:
raise ValueError(f'''invalid token: \'{token}\'''' )
def lowerCAmelCase__ ( self , UpperCAmelCase__ ):
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(UpperCAmelCase__ )
except TypeError:
raise ValueError(f'''invalid id: {index}''' )
def lowerCAmelCase__ ( self , UpperCAmelCase__ ):
return "".join(UpperCAmelCase__ )
def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ):
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = [1] + ([0] * len(UpperCAmelCase__ )) + [1]
if token_ids_a is not None:
result += ([0] * len(UpperCAmelCase__ )) + [1]
return result
def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ):
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ):
return ()
| 112 |
"""simple docstring"""
from math import sqrt
def __lowercase ( lowerCamelCase_ : int ):
SCREAMING_SNAKE_CASE__ = 0
for i in range(1 , int(sqrt(lowerCamelCase_ ) + 1 ) ):
if n % i == 0 and i != sqrt(lowerCamelCase_ ):
total += i + n // i
elif i == sqrt(lowerCamelCase_ ):
total += i
return total - n
def __lowercase ( lowerCamelCase_ : int = 10000 ):
SCREAMING_SNAKE_CASE__ = sum(
i
for i in range(1 , lowerCamelCase_ )
if sum_of_divisors(sum_of_divisors(lowerCamelCase_ ) ) == i and sum_of_divisors(lowerCamelCase_ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 112 | 1 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
A : Dict = {
'cola': 2,
'mnli': 3,
'mrpc': 2,
'sst-2': 2,
'sts-b': 1,
'qqp': 2,
'qnli': 2,
'rte': 2,
'wnli': 2,
}
logging.set_verbosity_info()
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None ):
SCREAMING_SNAKE_CASE_ = XLNetConfig.from_json_file(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''' )
SCREAMING_SNAKE_CASE_ = finetuning_task
SCREAMING_SNAKE_CASE_ = GLUE_TASKS_NUM_LABELS[finetuning_task]
SCREAMING_SNAKE_CASE_ = XLNetForSequenceClassification(__UpperCamelCase )
elif "squad" in finetuning_task:
SCREAMING_SNAKE_CASE_ = finetuning_task
SCREAMING_SNAKE_CASE_ = XLNetForQuestionAnswering(__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE_ = XLNetLMHeadModel(__UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Save pytorch-model
SCREAMING_SNAKE_CASE_ = os.path.join(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = os.path.join(__UpperCamelCase , __UpperCamelCase )
print(F'''Save PyTorch model to {os.path.abspath(__UpperCamelCase )}''' )
torch.save(model.state_dict() , __UpperCamelCase )
print(F'''Save configuration file to {os.path.abspath(__UpperCamelCase )}''' )
with open(__UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
A : Any = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 140 |
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def UpperCamelCase ( __magic_name__ : Any ) -> int:
"""simple docstring"""
lowercase__ = model.config
lowercase__ = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
lowercase__ = MBartConfig(
is_decoder=__magic_name__ , is_encoder_decoder=__magic_name__ , add_cross_attention=__magic_name__ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=__magic_name__ , add_final_layer_norm=__magic_name__ , )
return encoder_config, decoder_config
def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
if "encoder.model" in name:
lowercase__ = name.replace("""encoder.model""" , """encoder""" )
if "decoder.model" in name:
lowercase__ = name.replace("""decoder.model""" , """decoder""" )
if "patch_embed.proj" in name:
lowercase__ = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowercase__ = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if name.startswith("""encoder""" ):
if "layers" in name:
lowercase__ = """encoder.""" + name
if "attn.proj" in name:
lowercase__ = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "mask" not in name:
lowercase__ = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowercase__ = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowercase__ = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowercase__ = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowercase__ = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "encoder.norm.weight":
lowercase__ = """encoder.layernorm.weight"""
if name == "encoder.norm.bias":
lowercase__ = """encoder.layernorm.bias"""
return name
def UpperCamelCase ( __magic_name__ : Any , __magic_name__ : str ) -> Optional[int]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowercase__ = orig_state_dict.pop(__magic_name__ )
if "qkv" in key:
lowercase__ = key.split(""".""" )
lowercase__ = int(key_split[3] )
lowercase__ = int(key_split[5] )
lowercase__ = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowercase__ = val[:dim, :]
lowercase__ = val[dim : dim * 2, :]
lowercase__ = val[-dim:, :]
else:
lowercase__ = val[:dim]
lowercase__ = val[dim : dim * 2]
lowercase__ = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
lowercase__ = val
return orig_state_dict
def UpperCamelCase ( __magic_name__ : Union[str, Any] , __magic_name__ : List[Any]=None , __magic_name__ : Dict=False ) -> int:
"""simple docstring"""
lowercase__ = DonutModel.from_pretrained(__magic_name__ ).eval()
# load HuggingFace model
lowercase__ , lowercase__ = get_configs(__magic_name__ )
lowercase__ = DonutSwinModel(__magic_name__ )
lowercase__ = MBartForCausalLM(__magic_name__ )
lowercase__ = VisionEncoderDecoderModel(encoder=__magic_name__ , decoder=__magic_name__ )
model.eval()
lowercase__ = original_model.state_dict()
lowercase__ = convert_state_dict(__magic_name__ , __magic_name__ )
model.load_state_dict(__magic_name__ )
# verify results on scanned document
lowercase__ = load_dataset("""hf-internal-testing/example-documents""" )
lowercase__ = dataset["""test"""][0]["""image"""].convert("""RGB""" )
lowercase__ = XLMRobertaTokenizerFast.from_pretrained(__magic_name__ , from_slow=__magic_name__ )
lowercase__ = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
lowercase__ = DonutProcessor(__magic_name__ , __magic_name__ )
lowercase__ = processor(__magic_name__ , return_tensors="""pt""" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
lowercase__ = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
lowercase__ = """When is the coffee break?"""
lowercase__ = task_prompt.replace("""{user_input}""" , __magic_name__ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
lowercase__ = """<s_rvlcdip>"""
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
lowercase__ = """<s_cord>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
lowercase__ = """s_cord-v2>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
lowercase__ = """<s_zhtrainticket>"""
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
lowercase__ = """hello world"""
else:
raise ValueError("""Model name not supported""" )
lowercase__ = original_model.decoder.tokenizer(__magic_name__ , add_special_tokens=__magic_name__ , return_tensors="""pt""" )[
"""input_ids"""
]
lowercase__ = original_model.encoder.model.patch_embed(__magic_name__ )
lowercase__ , lowercase__ = model.encoder.embeddings(__magic_name__ )
assert torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 )
# verify encoder hidden states
lowercase__ = original_model.encoder(__magic_name__ )
lowercase__ = model.encoder(__magic_name__ ).last_hidden_state
assert torch.allclose(__magic_name__ , __magic_name__ , atol=1E-2 )
# verify decoder hidden states
lowercase__ = original_model(__magic_name__ , __magic_name__ , __magic_name__ ).logits
lowercase__ = model(__magic_name__ , decoder_input_ids=__magic_name__ ).logits
assert torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(__magic_name__ )
processor.save_pretrained(__magic_name__ )
if push_to_hub:
model.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
processor.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
if __name__ == "__main__":
A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='naver-clova-ix/donut-base-finetuned-docvqa',
required=False,
type=str,
help='Name of the original model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
required=False,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub.',
)
A : Optional[int] = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 15 | 0 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = ["image_processor", "tokenizer"]
snake_case_ = "FlavaImageProcessor"
snake_case_ = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : Optional[Any] , __snake_case : str=None , __snake_case : Any=None , **__snake_case : Union[str, Any] )-> str:
snake_case = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __snake_case , )
snake_case = kwargs.pop("""feature_extractor""" )
snake_case = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__snake_case , __snake_case )
snake_case = self.image_processor
def __call__( self : int , __snake_case : Optional[ImageInput] = None , __snake_case : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , __snake_case : bool = True , __snake_case : Union[bool, str, PaddingStrategy] = False , __snake_case : Union[bool, str, TruncationStrategy] = False , __snake_case : Optional[int] = None , __snake_case : int = 0 , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = True , __snake_case : Optional[Union[str, TensorType]] = None , **__snake_case : Dict , )-> Union[str, Any]:
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
snake_case = self.tokenizer(
text=__snake_case , add_special_tokens=__snake_case , padding=__snake_case , truncation=__snake_case , max_length=__snake_case , stride=__snake_case , pad_to_multiple_of=__snake_case , return_token_type_ids=__snake_case , return_attention_mask=__snake_case , return_overflowing_tokens=__snake_case , return_special_tokens_mask=__snake_case , return_offsets_mapping=__snake_case , return_length=__snake_case , verbose=__snake_case , return_tensors=__snake_case , **__snake_case , )
if images is not None:
snake_case = self.image_processor(
__snake_case , return_image_mask=__snake_case , return_codebook_pixels=__snake_case , return_tensors=__snake_case , **__snake_case , )
if text is not None and images is not None:
encoding.update(__snake_case )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__snake_case ) , tensor_type=__snake_case )
def lowerCAmelCase ( self : Optional[int] , *__snake_case : Dict , **__snake_case : Union[str, Any] )-> int:
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def lowerCAmelCase ( self : Any , *__snake_case : int , **__snake_case : Tuple )-> Optional[int]:
return self.tokenizer.decode(*__snake_case , **__snake_case )
@property
def lowerCAmelCase ( self : List[str] )-> Tuple:
snake_case = self.tokenizer.model_input_names
snake_case = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCAmelCase ( self : Optional[int] )-> List[str]:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __snake_case , )
return self.image_processor_class
@property
def lowerCAmelCase ( self : int )-> Any:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __snake_case , )
return self.image_processor
| 714 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = 42
snake_case_ = 42
snake_case_ = None
class _lowerCAmelCase ( A__ , A__ ):
"""simple docstring"""
snake_case_ = 2
@register_to_config
def __init__( self : Any , __snake_case : float = 0.02 , __snake_case : float = 1_00 , __snake_case : float = 1.0_07 , __snake_case : float = 80 , __snake_case : float = 0.05 , __snake_case : float = 50 , )-> List[str]:
# standard deviation of the initial noise distribution
snake_case = sigma_max
# setable values
snake_case = None
snake_case = None
snake_case = None # sigma(t_i)
def lowerCAmelCase ( self : Optional[int] , __snake_case : torch.FloatTensor , __snake_case : Optional[int] = None )-> torch.FloatTensor:
return sample
def lowerCAmelCase ( self : Optional[int] , __snake_case : int , __snake_case : Union[str, torch.device] = None )-> Dict:
snake_case = num_inference_steps
snake_case = np.arange(0 , self.num_inference_steps )[::-1].copy()
snake_case = torch.from_numpy(__snake_case ).to(__snake_case )
snake_case = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
snake_case = torch.tensor(__snake_case , dtype=torch.floataa , device=__snake_case )
def lowerCAmelCase ( self : Optional[int] , __snake_case : torch.FloatTensor , __snake_case : float , __snake_case : Optional[torch.Generator] = None )-> Tuple[torch.FloatTensor, float]:
if self.config.s_min <= sigma <= self.config.s_max:
snake_case = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
snake_case = 0
# sample eps ~ N(0, S_noise^2 * I)
snake_case = self.config.s_noise * randn_tensor(sample.shape , generator=__snake_case ).to(sample.device )
snake_case = sigma + gamma * sigma
snake_case = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def lowerCAmelCase ( self : Optional[Any] , __snake_case : torch.FloatTensor , __snake_case : float , __snake_case : float , __snake_case : torch.FloatTensor , __snake_case : bool = True , )-> Union[KarrasVeOutput, Tuple]:
snake_case = sample_hat + sigma_hat * model_output
snake_case = (sample_hat - pred_original_sample) / sigma_hat
snake_case = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=__snake_case , derivative=__snake_case , pred_original_sample=__snake_case )
def lowerCAmelCase ( self : List[str] , __snake_case : torch.FloatTensor , __snake_case : float , __snake_case : float , __snake_case : torch.FloatTensor , __snake_case : torch.FloatTensor , __snake_case : torch.FloatTensor , __snake_case : bool = True , )-> Union[KarrasVeOutput, Tuple]:
snake_case = sample_prev + sigma_prev * model_output
snake_case = (sample_prev - pred_original_sample) / sigma_prev
snake_case = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=__snake_case , derivative=__snake_case , pred_original_sample=__snake_case )
def lowerCAmelCase ( self : Dict , __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : Optional[int] )-> Any:
raise NotImplementedError()
| 517 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __lowercase ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
snake_case_ = StableDiffusionPanoramaPipeline
snake_case_ = TEXT_TO_IMAGE_PARAMS
snake_case_ = TEXT_TO_IMAGE_BATCH_PARAMS
snake_case_ = TEXT_TO_IMAGE_IMAGE_PARAMS
snake_case_ = TEXT_TO_IMAGE_IMAGE_PARAMS
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=1 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,)
UpperCAmelCase__ : int = DDIMScheduler()
torch.manual_seed(0 )
UpperCAmelCase__ : str = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
torch.manual_seed(0 )
UpperCAmelCase__ : Optional[int] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,)
UpperCAmelCase__ : Optional[Any] = CLIPTextModel(A )
UpperCAmelCase__ : List[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
UpperCAmelCase__ : List[str] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __lowercase ( self : Union[str, Any] ,A : Dict ,A : Tuple=0 ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = torch.manual_seed(A )
UpperCAmelCase__ : Union[str, Any] = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
# Setting height and width to None to prevent OOMs on CPU.
"""height""": None,
"""width""": None,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ : str = self.get_dummy_components()
UpperCAmelCase__ : Any = StableDiffusionPanoramaPipeline(**A )
UpperCAmelCase__ : Dict = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
UpperCAmelCase__ : Union[str, Any] = self.get_dummy_inputs(A )
UpperCAmelCase__ : Dict = sd_pipe(**A ).images
UpperCAmelCase__ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ : Union[str, Any] = np.array([0.6_1_8_6, 0.5_3_7_4, 0.4_9_1_5, 0.4_1_3_5, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_7, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowercase ( self : List[str] ):
'''simple docstring'''
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
super().test_inference_batch_single_identical(batch_size=2 ,expected_max_diff=3.25e-3 )
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ : str = self.get_dummy_components()
UpperCAmelCase__ : Union[str, Any] = StableDiffusionPanoramaPipeline(**A )
UpperCAmelCase__ : Optional[Any] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
UpperCAmelCase__ : List[Any] = self.get_dummy_inputs(A )
UpperCAmelCase__ : Optional[int] = """french fries"""
UpperCAmelCase__ : List[str] = sd_pipe(**A ,negative_prompt=A )
UpperCAmelCase__ : Optional[Any] = output.images
UpperCAmelCase__ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ : str = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ : str = self.get_dummy_components()
UpperCAmelCase__ : Dict = StableDiffusionPanoramaPipeline(**A )
UpperCAmelCase__ : List[str] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
UpperCAmelCase__ : Any = self.get_dummy_inputs(A )
UpperCAmelCase__ : List[str] = sd_pipe(**A ,view_batch_size=2 )
UpperCAmelCase__ : Dict = output.images
UpperCAmelCase__ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ : Optional[Any] = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ : Optional[int] = self.get_dummy_components()
UpperCAmelCase__ : List[str] = EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule="""scaled_linear""" )
UpperCAmelCase__ : Optional[Any] = StableDiffusionPanoramaPipeline(**A )
UpperCAmelCase__ : List[str] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
UpperCAmelCase__ : Optional[int] = self.get_dummy_inputs(A )
UpperCAmelCase__ : Tuple = sd_pipe(**A ).images
UpperCAmelCase__ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ : Any = np.array([0.4_0_2_4, 0.6_5_1_0, 0.4_9_0_1, 0.5_3_7_8, 0.5_8_1_3, 0.5_6_2_2, 0.4_7_9_5, 0.4_4_6_7, 0.4_9_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ : List[Any] = self.get_dummy_components()
UpperCAmelCase__ : Union[str, Any] = PNDMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule="""scaled_linear""" ,skip_prk_steps=A )
UpperCAmelCase__ : List[str] = StableDiffusionPanoramaPipeline(**A )
UpperCAmelCase__ : int = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
UpperCAmelCase__ : List[str] = self.get_dummy_inputs(A )
UpperCAmelCase__ : int = sd_pipe(**A ).images
UpperCAmelCase__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ : str = np.array([0.6_3_9_1, 0.6_2_9_1, 0.4_8_6_1, 0.5_1_3_4, 0.5_5_5_2, 0.4_5_7_8, 0.5_0_3_2, 0.5_0_2_3, 0.4_5_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
def __lowercase ( self : List[str] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self : int ,A : Dict=0 ):
'''simple docstring'''
UpperCAmelCase__ : Dict = torch.manual_seed(A )
UpperCAmelCase__ : Tuple = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = """stabilityai/stable-diffusion-2-base"""
UpperCAmelCase__ : Union[str, Any] = DDIMScheduler.from_pretrained(A ,subfolder="""scheduler""" )
UpperCAmelCase__ : List[str] = StableDiffusionPanoramaPipeline.from_pretrained(A ,scheduler=A ,safety_checker=A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
UpperCAmelCase__ : Union[str, Any] = self.get_inputs()
UpperCAmelCase__ : Tuple = pipe(**A ).images
UpperCAmelCase__ : Tuple = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2_048, 3)
UpperCAmelCase__ : List[Any] = np.array(
[
0.3_6_9_6_8_3_9_2,
0.2_7_0_2_5_3_7_2,
0.3_2_4_4_6_7_6_6,
0.2_8_3_7_9_3_8_7,
0.3_6_3_6_3_2_7_4,
0.3_0_7_3_3_3_4_7,
0.2_7_1_0_0_0_2_7,
0.2_7_0_5_4_1_2_5,
0.2_5_5_3_6_0_9_6,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def __lowercase ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : str = StableDiffusionPanoramaPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-base""" ,safety_checker=A )
UpperCAmelCase__ : Optional[int] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
UpperCAmelCase__ : List[str] = self.get_inputs()
UpperCAmelCase__ : Union[str, Any] = pipe(**A ).images
UpperCAmelCase__ : List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2_048, 3)
UpperCAmelCase__ : Any = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = 0
def callback_fn(A : int ,A : int ,A : torch.FloatTensor ) -> None:
UpperCAmelCase__ : Union[str, Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
UpperCAmelCase__ : Tuple = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
UpperCAmelCase__ : Union[str, Any] = latents[0, -3:, -3:, -1]
UpperCAmelCase__ : Any = np.array(
[
0.1_8_6_8_1_8_6_9,
0.3_3_9_0_7_8_1_6,
0.5_3_6_1_2_7_6,
0.1_4_4_3_2_8_6_5,
-0.0_2_8_5_6_6_1_1,
-0.7_3_9_4_1_1_2_3,
0.2_3_3_9_7_9_8_7,
0.4_7_3_2_2_6_8_2,
-0.3_7_8_2_3_1_6_4,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
UpperCAmelCase__ : Any = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
UpperCAmelCase__ : Union[str, Any] = latents[0, -3:, -3:, -1]
UpperCAmelCase__ : int = np.array(
[
0.1_8_5_3_9_6_4_5,
0.3_3_9_8_7_2_4_8,
0.5_3_7_8_5_5_9,
0.1_4_4_3_7_1_4_2,
-0.0_2_4_5_5_2_6_1,
-0.7_3_3_8_3_1_7,
0.2_3_9_9_0_7_5_5,
0.4_7_3_5_6_2_7_2,
-0.3_7_8_6_5_0_5,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
UpperCAmelCase__ : str = False
UpperCAmelCase__ : Union[str, Any] = """stabilityai/stable-diffusion-2-base"""
UpperCAmelCase__ : Optional[int] = DDIMScheduler.from_pretrained(A ,subfolder="""scheduler""" )
UpperCAmelCase__ : int = StableDiffusionPanoramaPipeline.from_pretrained(A ,scheduler=A ,safety_checker=A )
UpperCAmelCase__ : List[Any] = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
UpperCAmelCase__ : List[str] = self.get_inputs()
pipe(**A ,callback=A ,callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __lowercase ( self : List[Any] ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase__ : int = """stabilityai/stable-diffusion-2-base"""
UpperCAmelCase__ : List[str] = DDIMScheduler.from_pretrained(A ,subfolder="""scheduler""" )
UpperCAmelCase__ : Dict = StableDiffusionPanoramaPipeline.from_pretrained(A ,scheduler=A ,safety_checker=A )
UpperCAmelCase__ : List[str] = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase__ : str = self.get_inputs()
UpperCAmelCase__ : Union[str, Any] = pipe(**A )
UpperCAmelCase__ : int = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 65 |
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
UpperCamelCase__ =logging.get_logger(__name__)
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
def __init__( self , *__lowerCamelCase , **__lowerCamelCase ) -> None:
warnings.warn(
"The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use OwlViTImageProcessor instead." , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase ) | 249 | 0 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a :int = 16
a :List[str] = 32
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase = 16 ) -> Dict:
SCREAMING_SNAKE_CASE__ : Any = AutoTokenizer.from_pretrained("""bert-base-cased""" )
SCREAMING_SNAKE_CASE__ : List[Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__lowerCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE__ : int = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE__ : Dict = datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE__ : str = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__lowerCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE__ : Tuple = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE__ : int = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE__ : Optional[int] = 8
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = None
return tokenizer.pad(
__lowerCAmelCase , padding="""longest""" , max_length=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = DataLoader(
tokenized_datasets["""train"""] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a :Union[str, Any] = mocked_dataloaders # noqa: F811
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __lowerCAmelCase ) == "1":
SCREAMING_SNAKE_CASE__ : str = 2
# New Code #
SCREAMING_SNAKE_CASE__ : Optional[Any] = int(args.gradient_accumulation_steps )
# Initialize accelerator
SCREAMING_SNAKE_CASE__ : List[str] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__lowerCAmelCase )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"""Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE__ : int = config["""lr"""]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int(config["""num_epochs"""] )
SCREAMING_SNAKE_CASE__ : int = int(config["""seed"""] )
SCREAMING_SNAKE_CASE__ : List[str] = int(config["""batch_size"""] )
SCREAMING_SNAKE_CASE__ : Optional[int] = evaluate.load("""glue""" , """mrpc""" )
set_seed(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = get_dataloaders(__lowerCAmelCase , __lowerCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE__ : int = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__lowerCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE__ : Optional[Any] = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE__ : List[str] = AdamW(params=model.parameters() , lr=__lowerCAmelCase )
# Instantiate scheduler
SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=__lowerCAmelCase , num_warmup_steps=100 , num_training_steps=(len(__lowerCAmelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = accelerator.prepare(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Now we train the model
for epoch in range(__lowerCAmelCase ):
model.train()
for step, batch in enumerate(__lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Optional[int] = model(**__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = output.loss
accelerator.backward(__lowerCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Any = model(**__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__lowerCAmelCase , references=__lowerCAmelCase , )
SCREAMING_SNAKE_CASE__ : List[str] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , __lowerCAmelCase )
def _lowercase ( ) -> Tuple:
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__lowerCAmelCase , default=__lowerCAmelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=__lowerCAmelCase , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args()
SCREAMING_SNAKE_CASE__ : List[str] = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 12 |
"""simple docstring"""
class __a :
'''simple docstring'''
def __init__( self , _a , _a , _a ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = name
SCREAMING_SNAKE_CASE__ : Optional[Any] = value
SCREAMING_SNAKE_CASE__ : List[Any] = weight
def __repr__( self ) -> List[Any]:
"""simple docstring"""
return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def _a ( self ) -> Dict:
"""simple docstring"""
return self.value
def _a ( self ) -> int:
"""simple docstring"""
return self.name
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
return self.weight
def _a ( self ) -> Dict:
"""simple docstring"""
return self.value / self.weight
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
SCREAMING_SNAKE_CASE__ : Any = []
for i in range(len(__lowerCAmelCase ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = sorted(__lowerCAmelCase , key=__lowerCAmelCase , reverse=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : str = []
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = 0.0, 0.0
for i in range(len(__lowerCAmelCase ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def _lowercase ( ) -> List[str]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12 | 1 |
'''simple docstring'''
__lowercase : Dict = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def lowercase_ ( ) -> None:
'''simple docstring'''
lowerCamelCase_ : int = input('''Enter message: ''' )
lowerCamelCase_ : Optional[int] = input('''Enter key [alphanumeric]: ''' )
lowerCamelCase_ : Dict = input('''Encrypt/Decrypt [e/d]: ''' )
if mode.lower().startswith('''e''' ):
lowerCamelCase_ : str = '''encrypt'''
lowerCamelCase_ : List[str] = encrypt_message(_UpperCAmelCase , _UpperCAmelCase )
elif mode.lower().startswith('''d''' ):
lowerCamelCase_ : Optional[int] = '''decrypt'''
lowerCamelCase_ : Optional[int] = decrypt_message(_UpperCAmelCase , _UpperCAmelCase )
print(F"""\n{mode.title()}ed message:""" )
print(_UpperCAmelCase )
def lowercase_ ( _lowercase , _lowercase ) -> str:
'''simple docstring'''
return translate_message(_UpperCAmelCase , _UpperCAmelCase , '''encrypt''' )
def lowercase_ ( _lowercase , _lowercase ) -> str:
'''simple docstring'''
return translate_message(_UpperCAmelCase , _UpperCAmelCase , '''decrypt''' )
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> str:
'''simple docstring'''
lowerCamelCase_ : List[str] = []
lowerCamelCase_ : Optional[int] = 0
lowerCamelCase_ : Tuple = key.upper()
for symbol in message:
lowerCamelCase_ : Tuple = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(_UpperCAmelCase )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(_UpperCAmelCase ):
lowerCamelCase_ : str = 0
else:
translated.append(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
if __name__ == "__main__":
main()
| 422 | from heapq import heappop, heappush
import numpy as np
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> tuple[float | int, list[tuple[int, int]]]:
'''simple docstring'''
__lowercase , __lowercase = grid.shape
__lowercase = [-1, 1, 0, 0]
__lowercase = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
__lowercase , __lowercase = [(0, source)], set()
__lowercase = np.full((rows, cols) , np.inf )
__lowercase = 0
__lowercase = np.empty((rows, cols) , dtype=_UpperCAmelCase )
__lowercase = None
while queue:
((__lowercase) , (__lowercase)) = heappop(_UpperCAmelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
__lowercase = []
while (x, y) != source:
path.append((x, y) )
__lowercase , __lowercase = predecessors[x, y]
path.append(_UpperCAmelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(_UpperCAmelCase ) ):
__lowercase , __lowercase = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
__lowercase = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(_UpperCAmelCase , (dist + 1, (nx, ny)) )
__lowercase = dist + 1
__lowercase = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 321 | 0 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__snake_case :Optional[Any] ={
'configuration_gpt_neox_japanese': ['GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXJapaneseConfig'],
'tokenization_gpt_neox_japanese': ['GPTNeoXJapaneseTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Optional[Any] =[
'GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoXJapaneseForCausalLM',
'GPTNeoXJapaneseLayer',
'GPTNeoXJapaneseModel',
'GPTNeoXJapanesePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
__snake_case :int =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 705 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def lowerCamelCase_ ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : int ) -> int:
'''simple docstring'''
A = []
for part_id in partition_order:
A = df.where(F'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(lowerCAmelCase__ ):
expected_row_ids_and_row_dicts.append((F'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase_ ( ) -> Any:
'''simple docstring'''
A = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
A = spark.range(100 ).repartition(1 )
A = Spark(lowerCAmelCase__ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase_ ( ) -> Dict:
'''simple docstring'''
A = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
A = spark.range(10 ).repartition(2 )
A = [1, 0]
A = _generate_iterable_examples(lowerCAmelCase__ , lowerCAmelCase__ ) # Reverse the partitions.
A = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCAmelCase__ , lowerCAmelCase__ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
A , A = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase_ ( ) -> Any:
'''simple docstring'''
A = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
A = spark.range(10 ).repartition(1 )
A = SparkExamplesIterable(lowerCAmelCase__ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(lowerCAmelCase__ ):
assert row_id == F'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase_ ( ) -> List[Any]:
'''simple docstring'''
A = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
A = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('numpy.random.Generator' ) as generator_mock:
A = lambda lowerCAmelCase__ : x.reverse()
A = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCAmelCase__ , [2, 1, 0] )
A = SparkExamplesIterable(lowerCAmelCase__ ).shuffle_data_sources(lowerCAmelCase__ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(lowerCAmelCase__ ):
A , A = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase_ ( ) -> Tuple:
'''simple docstring'''
A = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
A = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
A = SparkExamplesIterable(lowerCAmelCase__ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
A = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCAmelCase__ , [0, 2] )
for i, (row_id, row_dict) in enumerate(lowerCAmelCase__ ):
A , A = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
A = SparkExamplesIterable(lowerCAmelCase__ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
A = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCAmelCase__ , [1, 3] )
for i, (row_id, row_dict) in enumerate(lowerCAmelCase__ ):
A , A = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase_ ( ) -> int:
'''simple docstring'''
A = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
A = spark.range(100 ).repartition(1 )
A = Spark(lowerCAmelCase__ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100 | 224 | 0 |
'''simple docstring'''
from __future__ import annotations
lowerCamelCase_ = []
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> bool:
'''simple docstring'''
for i in range(len(__lowercase ) ):
if board[row][i] == 1:
return False
for i in range(len(__lowercase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(__lowercase , -1 , -1 ) , range(__lowercase , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(__lowercase , -1 , -1 ) , range(__lowercase , len(__lowercase ) ) ):
if board[i][j] == 1:
return False
return True
def __lowercase ( __lowercase , __lowercase ) -> bool:
'''simple docstring'''
if row >= len(__lowercase ):
solution.append(__lowercase )
printboard(__lowercase )
print()
return True
for i in range(len(__lowercase ) ):
if is_safe(__lowercase , __lowercase , __lowercase ):
_A = 1
solve(__lowercase , row + 1 )
_A = 0
return False
def __lowercase ( __lowercase ) -> None:
'''simple docstring'''
for i in range(len(__lowercase ) ):
for j in range(len(__lowercase ) ):
if board[i][j] == 1:
print("Q" , end=" " )
else:
print("." , end=" " )
print()
# n=int(input("The no. of queens"))
lowerCamelCase_ = 8
lowerCamelCase_ = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 330 |
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
A_ = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
A_ = logging.WARNING
def UpperCAmelCase ( )-> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = os.getenv('''DATASETS_VERBOSITY''' ,UpperCAmelCase )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f'''Unknown option DATASETS_VERBOSITY={env_level_str}, '''
f'''has to be one of: { ', '.join(log_levels.keys() ) }''' )
return _default_log_level
def UpperCAmelCase ( )-> str:
'''simple docstring'''
return __name__.split('''.''' )[0]
def UpperCAmelCase ( )-> logging.Logger:
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def UpperCAmelCase ( )-> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def UpperCAmelCase ( )-> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def UpperCAmelCase ( UpperCAmelCase = None )-> logging.Logger:
'''simple docstring'''
if name is None:
SCREAMING_SNAKE_CASE_ = _get_library_name()
return logging.getLogger(UpperCAmelCase )
def UpperCAmelCase ( )-> int:
'''simple docstring'''
return _get_library_root_logger().getEffectiveLevel()
def UpperCAmelCase ( UpperCAmelCase )-> None:
'''simple docstring'''
_get_library_root_logger().setLevel(UpperCAmelCase )
def UpperCAmelCase ( )-> Optional[Any]:
'''simple docstring'''
return set_verbosity(UpperCAmelCase )
def UpperCAmelCase ( )-> Union[str, Any]:
'''simple docstring'''
return set_verbosity(UpperCAmelCase )
def UpperCAmelCase ( )-> Dict:
'''simple docstring'''
return set_verbosity(UpperCAmelCase )
def UpperCAmelCase ( )-> Optional[int]:
'''simple docstring'''
return set_verbosity(UpperCAmelCase )
def UpperCAmelCase ( )-> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = False
def UpperCAmelCase ( )-> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class snake_case :
'''simple docstring'''
def __init__( self : Tuple , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : str ) -> Optional[int]: # pylint: disable=unused-argument
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = args[0] if args else None
def __iter__( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return iter(self._iterator )
def __getattr__( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] ) -> int:
"""simple docstring"""
def empty_fn(*lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : Union[str, Any] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : str ) -> List[str]:
"""simple docstring"""
return self
def __exit__( self : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict ) -> Dict:
"""simple docstring"""
return
A_ = True
class snake_case :
'''simple docstring'''
def __call__( self : Union[str, Any] , *lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any]=False , **lowerCAmelCase_ : Tuple ) -> Tuple:
"""simple docstring"""
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*lowerCAmelCase_ , **lowerCAmelCase_ )
else:
return EmptyTqdm(*lowerCAmelCase_ , **lowerCAmelCase_ )
def _lowercase ( self : int , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : Optional[int] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*lowerCAmelCase_ , **lowerCAmelCase_ )
def _lowercase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
A_ = _tqdm_cls()
def UpperCAmelCase ( )-> bool:
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def UpperCAmelCase ( )-> List[str]:
'''simple docstring'''
global _tqdm_active
SCREAMING_SNAKE_CASE_ = True
def UpperCAmelCase ( )-> Optional[int]:
'''simple docstring'''
global _tqdm_active
SCREAMING_SNAKE_CASE_ = False
| 393 | 0 |
"""simple docstring"""
class lowerCAmelCase_ : # Public class to implement a graph
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ , snake_case_ ) -> None:
__lowerCAmelCase = row
__lowerCAmelCase = col
__lowerCAmelCase = graph
def A__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> bool:
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def A__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> None:
# Checking all 8 elements surrounding nth element
__lowerCAmelCase = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
__lowerCAmelCase = [-1, 0, 1, -1, 1, -1, 0, 1]
__lowerCAmelCase = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , A_ ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , A_ )
def A__ ( self ) -> int: # And finally, count all islands.
__lowerCAmelCase = [[False for j in range(self.COL )] for i in range(self.ROW )]
__lowerCAmelCase = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(A_ , A_ , A_ )
count += 1
return count
| 705 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
def __init__( self , *snake_case_ , **snake_case_ ) -> None:
warnings.warn(
"""The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use GLPNImageProcessor instead.""" , snake_case_ , )
super().__init__(*snake_case_ , **snake_case_ )
| 573 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 659 |
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class lowerCamelCase ( unittest.TestCase ):
def snake_case__ ( self :List[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = '''hf-internal-testing/tiny-random-t5'''
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(lowercase )
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(lowercase )
SCREAMING_SNAKE_CASE = tokenizer('''This is me''' , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE = model.to_bettertransformer()
self.assertTrue(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
SCREAMING_SNAKE_CASE = model.generate(**lowercase )
SCREAMING_SNAKE_CASE = model.reverse_bettertransformer()
self.assertFalse(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase )
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(lowercase )
self.assertFalse(
any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
SCREAMING_SNAKE_CASE = model_reloaded.generate(**lowercase )
self.assertTrue(torch.allclose(lowercase , lowercase ) )
def snake_case__ ( self :Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = '''hf-internal-testing/tiny-random-t5'''
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(lowercase )
SCREAMING_SNAKE_CASE = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(lowercase ):
model.save_pretrained(lowercase )
SCREAMING_SNAKE_CASE = model.reverse_bettertransformer()
model.save_pretrained(lowercase ) | 201 | 0 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class a ( _a ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=_A , unet=_A , scheduler=_A )
@torch.no_grad()
def __call__( self , snake_case_ = 1 , snake_case_ = None , snake_case_ = 0.0 , snake_case_ = 50 , snake_case_ = "pil" , snake_case_ = True , **snake_case_ , ):
'''simple docstring'''
__UpperCAmelCase: Union[str, Any] = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=_A , )
__UpperCAmelCase: Union[str, Any] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__UpperCAmelCase: Tuple = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_A )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
__UpperCAmelCase: List[Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__UpperCAmelCase: int = {}
if accepts_eta:
__UpperCAmelCase: Optional[int] = eta
for t in self.progress_bar(self.scheduler.timesteps ):
__UpperCAmelCase: Dict = self.scheduler.scale_model_input(_A , _A )
# predict the noise residual
__UpperCAmelCase: Dict = self.unet(_A , _A ).sample
# compute the previous noisy sample x_t -> x_t-1
__UpperCAmelCase: Any = self.scheduler.step(_A , _A , _A , **_A ).prev_sample
# decode the image latents with the VAE
__UpperCAmelCase: List[str] = self.vqvae.decode(_A ).sample
__UpperCAmelCase: Dict = (image / 2 + 0.5).clamp(0 , 1 )
__UpperCAmelCase: Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__UpperCAmelCase: Optional[Any] = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A ) | 714 | '''simple docstring'''
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class a ( unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = JukeboxTokenizer
__lowerCAmelCase = {
"""artist""": """Zac Brown Band""",
"""genres""": """Country""",
"""lyrics""": """I met a traveller from an antique land,
Who said \"Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
""",
}
@require_torch
def lowercase_ ( self ):
'''simple docstring'''
import torch
__UpperCAmelCase: Dict = JukeboxTokenizer.from_pretrained("""openai/jukebox-1b-lyrics""" )
__UpperCAmelCase: List[str] = tokenizer(**self.metas )["""input_ids"""]
# fmt: off
__UpperCAmelCase: int = [
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def lowercase_ ( self ):
'''simple docstring'''
import torch
__UpperCAmelCase: Any = JukeboxTokenizer.from_pretrained("""openai/jukebox-5b-lyrics""" )
__UpperCAmelCase: Tuple = tokenizer(**self.metas )["""input_ids"""]
# fmt: off
__UpperCAmelCase: int = [
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) | 466 | 0 |
"""simple docstring"""
def __snake_case ( SCREAMING_SNAKE_CASE: int ):
"""simple docstring"""
if length <= 0 or not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(SCREAMING_SNAKE_CASE )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=1_0))
| 580 |
"""simple docstring"""
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_snake_case = '''src/diffusers'''
_snake_case = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
_snake_case = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
_snake_case = spec.loader.load_module()
def __snake_case ( SCREAMING_SNAKE_CASE: Optional[Any] , SCREAMING_SNAKE_CASE: Optional[Any] ):
"""simple docstring"""
return line.startswith(SCREAMING_SNAKE_CASE ) or len(SCREAMING_SNAKE_CASE ) <= 1 or re.search(R'^\s*\)(\s*->.*:|:)\s*$' , SCREAMING_SNAKE_CASE ) is not None
def __snake_case ( SCREAMING_SNAKE_CASE: List[str] ):
"""simple docstring"""
_lowerCAmelCase = object_name.split('.' )
_lowerCAmelCase = 0
# First let's find the module where our object lives.
_lowerCAmelCase = parts[i]
while i < len(SCREAMING_SNAKE_CASE ) and not os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , f"""{module}.py""" ) ):
i += 1
if i < len(SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE , parts[i] )
if i >= len(SCREAMING_SNAKE_CASE ):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(SCREAMING_SNAKE_CASE , f"""{module}.py""" ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
_lowerCAmelCase = f.readlines()
# Now let's find the class / func in the code!
_lowerCAmelCase = ''
_lowerCAmelCase = 0
for name in parts[i + 1 :]:
while (
line_index < len(SCREAMING_SNAKE_CASE ) and re.search(Rf"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(SCREAMING_SNAKE_CASE ):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
_lowerCAmelCase = line_index
while line_index < len(SCREAMING_SNAKE_CASE ) and _should_continue(lines[line_index] , SCREAMING_SNAKE_CASE ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_lowerCAmelCase = lines[start_index:line_index]
return "".join(SCREAMING_SNAKE_CASE )
_snake_case = re.compile(R'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
_snake_case = re.compile(R'''^\s*(\S+)->(\S+)(\s+.*|$)''')
_snake_case = re.compile(R'''<FILL\s+[^>]*>''')
def __snake_case ( SCREAMING_SNAKE_CASE: List[str] ):
"""simple docstring"""
_lowerCAmelCase = code.split('\n' )
_lowerCAmelCase = 0
while idx < len(SCREAMING_SNAKE_CASE ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(SCREAMING_SNAKE_CASE ):
return re.search(R'^(\s*)\S' , lines[idx] ).groups()[0]
return ""
def __snake_case ( SCREAMING_SNAKE_CASE: Optional[Any] ):
"""simple docstring"""
_lowerCAmelCase = len(get_indent(SCREAMING_SNAKE_CASE ) ) > 0
if has_indent:
_lowerCAmelCase = f"""class Bla:\n{code}"""
_lowerCAmelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=SCREAMING_SNAKE_CASE )
_lowerCAmelCase = black.format_str(SCREAMING_SNAKE_CASE , mode=SCREAMING_SNAKE_CASE )
_lowerCAmelCase , _lowerCAmelCase = style_docstrings_in_code(SCREAMING_SNAKE_CASE )
return result[len('class Bla:\n' ) :] if has_indent else result
def __snake_case ( SCREAMING_SNAKE_CASE: Optional[int] , SCREAMING_SNAKE_CASE: List[str]=False ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
_lowerCAmelCase = f.readlines()
_lowerCAmelCase = []
_lowerCAmelCase = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = search.groups()
_lowerCAmelCase = find_code_in_diffusers(SCREAMING_SNAKE_CASE )
_lowerCAmelCase = get_indent(SCREAMING_SNAKE_CASE )
_lowerCAmelCase = line_index + 1 if indent == theoretical_indent else line_index + 2
_lowerCAmelCase = theoretical_indent
_lowerCAmelCase = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
_lowerCAmelCase = True
while line_index < len(SCREAMING_SNAKE_CASE ) and should_continue:
line_index += 1
if line_index >= len(SCREAMING_SNAKE_CASE ):
break
_lowerCAmelCase = lines[line_index]
_lowerCAmelCase = _should_continue(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and re.search(f"""^{indent}# End copy""" , SCREAMING_SNAKE_CASE ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_lowerCAmelCase = lines[start_index:line_index]
_lowerCAmelCase = ''.join(SCREAMING_SNAKE_CASE )
# Remove any nested `Copied from` comments to avoid circular copies
_lowerCAmelCase = [line for line in theoretical_code.split('\n' ) if _re_copy_warning.search(SCREAMING_SNAKE_CASE ) is None]
_lowerCAmelCase = '\n'.join(SCREAMING_SNAKE_CASE )
# Before comparing, use the `replace_pattern` on the original code.
if len(SCREAMING_SNAKE_CASE ) > 0:
_lowerCAmelCase = replace_pattern.replace('with' , '' ).split(',' )
_lowerCAmelCase = [_re_replace_pattern.search(SCREAMING_SNAKE_CASE ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = pattern.groups()
_lowerCAmelCase = re.sub(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if option.strip() == "all-casing":
_lowerCAmelCase = re.sub(obja.lower() , obja.lower() , SCREAMING_SNAKE_CASE )
_lowerCAmelCase = re.sub(obja.upper() , obja.upper() , SCREAMING_SNAKE_CASE )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
_lowerCAmelCase = blackify(lines[start_index - 1] + theoretical_code )
_lowerCAmelCase = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
_lowerCAmelCase = lines[:start_index] + [theoretical_code] + lines[line_index:]
_lowerCAmelCase = start_index + 1
if overwrite and len(SCREAMING_SNAKE_CASE ) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""" )
with open(SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(SCREAMING_SNAKE_CASE )
return diffs
def __snake_case ( SCREAMING_SNAKE_CASE: bool = False ):
"""simple docstring"""
_lowerCAmelCase = glob.glob(os.path.join(SCREAMING_SNAKE_CASE , '**/*.py' ) , recursive=SCREAMING_SNAKE_CASE )
_lowerCAmelCase = []
for filename in all_files:
_lowerCAmelCase = is_copy_consistent(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(SCREAMING_SNAKE_CASE ) > 0:
_lowerCAmelCase = '\n'.join(SCREAMING_SNAKE_CASE )
raise Exception(
'Found the following copy inconsistencies:\n'
+ diff
+ '\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.' )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
_snake_case = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 580 | 1 |
__UpperCamelCase : Tuple = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
def _a ( SCREAMING_SNAKE_CASE : dict , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
UpperCamelCase__ : Any = set()
# keep track of all the paths to be checked
UpperCamelCase__ : Optional[Any] = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
UpperCamelCase__ : List[str] = queue.pop(0 )
# get the last node from the path
UpperCamelCase__ : List[str] = path[-1]
if node not in explored:
UpperCamelCase__ : Dict = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
UpperCamelCase__ : Dict = list(SCREAMING_SNAKE_CASE )
new_path.append(SCREAMING_SNAKE_CASE )
queue.append(SCREAMING_SNAKE_CASE )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(SCREAMING_SNAKE_CASE )
# in case there's no path between the 2 nodes
return []
def _a ( SCREAMING_SNAKE_CASE : dict , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
UpperCamelCase__ : Dict = [start]
UpperCamelCase__ : List[Any] = set(SCREAMING_SNAKE_CASE )
# Keep tab on distances from `start` node.
UpperCamelCase__ : Tuple = {start: 0, target: -1}
while queue:
UpperCamelCase__ : Optional[Any] = queue.pop(0 )
if node == target:
UpperCamelCase__ : Any = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(SCREAMING_SNAKE_CASE )
queue.append(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, "G", "D")) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, "G", "D")) # returns 4
| 709 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
__UpperCamelCase : Optional[int] = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class __magic_name__ ( __lowerCAmelCase):
A: Optional[Any] = "markuplm"
def __init__( self : List[Any] , lowerCamelCase__ : Optional[int]=30522 , lowerCamelCase__ : Dict=768 , lowerCamelCase__ : List[str]=12 , lowerCamelCase__ : List[Any]=12 , lowerCamelCase__ : Optional[Any]=3072 , lowerCamelCase__ : int="gelu" , lowerCamelCase__ : Union[str, Any]=0.1 , lowerCamelCase__ : Optional[int]=0.1 , lowerCamelCase__ : Tuple=512 , lowerCamelCase__ : List[str]=2 , lowerCamelCase__ : Union[str, Any]=0.02 , lowerCamelCase__ : Optional[Any]=1E-1_2 , lowerCamelCase__ : List[str]=0 , lowerCamelCase__ : Tuple=0 , lowerCamelCase__ : List[str]=2 , lowerCamelCase__ : List[Any]=256 , lowerCamelCase__ : List[str]=1024 , lowerCamelCase__ : Optional[int]=216 , lowerCamelCase__ : Any=1001 , lowerCamelCase__ : int=32 , lowerCamelCase__ : Tuple=50 , lowerCamelCase__ : str="absolute" , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : List[Any]=None , **lowerCamelCase__ : List[str] , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ , )
UpperCamelCase__ : Optional[int] = vocab_size
UpperCamelCase__ : List[Any] = hidden_size
UpperCamelCase__ : Any = num_hidden_layers
UpperCamelCase__ : Tuple = num_attention_heads
UpperCamelCase__ : Any = hidden_act
UpperCamelCase__ : Dict = intermediate_size
UpperCamelCase__ : Optional[Any] = hidden_dropout_prob
UpperCamelCase__ : Optional[int] = attention_probs_dropout_prob
UpperCamelCase__ : Tuple = max_position_embeddings
UpperCamelCase__ : List[str] = type_vocab_size
UpperCamelCase__ : Any = initializer_range
UpperCamelCase__ : Dict = layer_norm_eps
UpperCamelCase__ : Any = position_embedding_type
UpperCamelCase__ : Optional[Any] = use_cache
UpperCamelCase__ : int = classifier_dropout
# additional properties
UpperCamelCase__ : int = max_depth
UpperCamelCase__ : Optional[Any] = max_xpath_tag_unit_embeddings
UpperCamelCase__ : List[Any] = max_xpath_subs_unit_embeddings
UpperCamelCase__ : List[Any] = tag_pad_id
UpperCamelCase__ : Optional[int] = subs_pad_id
UpperCamelCase__ : Optional[Any] = xpath_unit_hidden_size
| 106 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[str] ,A_ : Tuple ,A_ : List[Any]=7 ,A_ : Dict=3 ,A_ : Any=18 ,A_ : List[Any]=30 ,A_ : List[Any]=400 ,A_ : List[Any]=True ,A_ : Optional[int]=None ,A_ : int=True ,A_ : Any=None ,A_ : Optional[Any]=True ,A_ : Union[str, Any]=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] ,A_ : str=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] ,A_ : Union[str, Any]=True ,) -> int:
A = size if size is not None else {'height': 224, 'width': 224}
A = crop_size if crop_size is not None else {'height': 18, 'width': 18}
A = parent
A = batch_size
A = num_channels
A = image_size
A = min_resolution
A = max_resolution
A = do_resize
A = size
A = do_center_crop
A = crop_size
A = do_normalize
A = image_mean
A = image_std
A = do_convert_rgb
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[Any]=False ,A_ : List[str]=False ,A_ : Optional[Any]=False ) -> List[Any]:
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
A = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 ,size=(self.num_channels, self.max_resolution, self.max_resolution) ,dtype=np.uinta ) )
else:
A = []
for i in range(self.batch_size ):
A , A = np.random.choice(np.arange(self.min_resolution ,self.max_resolution ) ,2 )
image_inputs.append(np.random.randint(255 ,size=(self.num_channels, width, height) ,dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
A = [Image.fromarray(np.moveaxis(A_ ,0 ,-1 ) ) for x in image_inputs]
if torchify:
A = [torch.from_numpy(A_ ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Tuple = ChineseCLIPImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
A = ChineseCLIPImageProcessingTester(self ,do_center_crop=A_ )
@property
def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ ,'do_resize' ) )
self.assertTrue(hasattr(A_ ,'size' ) )
self.assertTrue(hasattr(A_ ,'do_center_crop' ) )
self.assertTrue(hasattr(A_ ,'center_crop' ) )
self.assertTrue(hasattr(A_ ,'do_normalize' ) )
self.assertTrue(hasattr(A_ ,'image_mean' ) )
self.assertTrue(hasattr(A_ ,'image_std' ) )
self.assertTrue(hasattr(A_ ,'do_convert_rgb' ) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'height': 224, 'width': 224} )
self.assertEqual(image_processor.crop_size ,{'height': 18, 'width': 18} )
A = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size ,{'height': 84, 'width': 84} )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
pass
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A = self.image_processor_tester.prepare_inputs(equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ ,Image.Image )
# Test not batched input
A = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
A = image_processing(A_ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A = self.image_processor_tester.prepare_inputs(equal_resolution=A_ ,numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ ,np.ndarray )
# Test not batched input
A = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
A = image_processing(A_ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A = self.image_processor_tester.prepare_inputs(equal_resolution=A_ ,torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ ,torch.Tensor )
# Test not batched input
A = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
A = image_processing(A_ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
@require_torch
@require_vision
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Dict = ChineseCLIPImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
A = ChineseCLIPImageProcessingTester(self ,num_channels=4 ,do_center_crop=A_ )
A = 3
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ ,'do_resize' ) )
self.assertTrue(hasattr(A_ ,'size' ) )
self.assertTrue(hasattr(A_ ,'do_center_crop' ) )
self.assertTrue(hasattr(A_ ,'center_crop' ) )
self.assertTrue(hasattr(A_ ,'do_normalize' ) )
self.assertTrue(hasattr(A_ ,'image_mean' ) )
self.assertTrue(hasattr(A_ ,'image_std' ) )
self.assertTrue(hasattr(A_ ,'do_convert_rgb' ) )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
pass
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A = self.image_processor_tester.prepare_inputs(equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ ,Image.Image )
# Test not batched input
A = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
A = image_processing(A_ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,) | 91 |
"""simple docstring"""
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class SCREAMING_SNAKE_CASE_ ( __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : Dict = BertJapaneseTokenizer
__lowercase : List[str] = False
__lowercase : List[Any] = True
def snake_case_ ( self):
super().setUp()
__SCREAMING_SNAKE_CASE = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""こんにちは""",
"""こん""",
"""にちは""",
"""ばんは""",
"""##こん""",
"""##にちは""",
"""##ばんは""",
"""世界""",
"""##世界""",
"""、""",
"""##、""",
"""。""",
"""##。""",
]
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens]))
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = """こんにちは、世界。 \nこんばんは、世界。"""
__SCREAMING_SNAKE_CASE = """こんにちは 、 世界 。 こんばんは 、 世界 。"""
return input_text, output_text
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.get_input_output_texts(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.decode(lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__)
return text, ids
def snake_case_ ( self):
pass # TODO add if relevant
def snake_case_ ( self):
pass # TODO add if relevant
def snake_case_ ( self):
pass # TODO add if relevant
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file)
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""こんにちは、世界。\nこんばんは、世界。""")
self.assertListEqual(lowerCAmelCase__ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""mecab""")
self.assertIsNotNone(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = """こんにちは、世界。\nこんばんは、世界。"""
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4])
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , """tokenizer.bin""")
with open(lowerCAmelCase__ , """wb""") as handle:
pickle.dump(lowerCAmelCase__ , lowerCAmelCase__)
with open(lowerCAmelCase__ , """rb""") as handle:
__SCREAMING_SNAKE_CASE = pickle.load(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer_new.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = MecabTokenizer(mecab_dic="""ipadic""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def snake_case_ ( self):
try:
__SCREAMING_SNAKE_CASE = MecabTokenizer(mecab_dic="""unidic_lite""")
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def snake_case_ ( self):
try:
__SCREAMING_SNAKE_CASE = MecabTokenizer(mecab_dic="""unidic""")
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = MecabTokenizer(do_lower_case=lowerCAmelCase__ , mecab_dic="""ipadic""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップルストア""", """で""", """iphone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def snake_case_ ( self):
try:
__SCREAMING_SNAKE_CASE = MecabTokenizer(
do_lower_case=lowerCAmelCase__ , normalize_text=lowerCAmelCase__ , mecab_option="""-d /usr/local/lib/mecab/dic/jumandic""")
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = MecabTokenizer(normalize_text=lowerCAmelCase__ , mecab_dic="""ipadic""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """ """, """。"""] , )
@require_sudachi
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""sudachi""")
self.assertIsNotNone(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = """こんにちは、世界。\nこんばんは、世界。"""
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4])
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , """tokenizer.bin""")
with open(lowerCAmelCase__ , """wb""") as handle:
pickle.dump(lowerCAmelCase__ , lowerCAmelCase__)
with open(lowerCAmelCase__ , """rb""") as handle:
__SCREAMING_SNAKE_CASE = pickle.load(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer_new.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
@require_sudachi
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = SudachiTokenizer(sudachi_dict_type="""core""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , )
@require_sudachi
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""A""")
self.assertListEqual(tokenizer.tokenize("""外国人参政権""") , ["""外国""", """人""", """参政""", """権"""])
@require_sudachi
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""B""")
self.assertListEqual(tokenizer.tokenize("""外国人参政権""") , ["""外国人""", """参政権"""])
@require_sudachi
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""C""")
self.assertListEqual(tokenizer.tokenize("""外国人参政権""") , ["""外国人参政権"""])
@require_sudachi
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = SudachiTokenizer(do_lower_case=lowerCAmelCase__ , sudachi_dict_type="""core""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iphone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , )
@require_sudachi
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = SudachiTokenizer(normalize_text=lowerCAmelCase__ , sudachi_dict_type="""core""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """\u3000""", """。""", """ """, """ """] , )
@require_sudachi
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = SudachiTokenizer(trim_whitespace=lowerCAmelCase__ , sudachi_dict_type="""core""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
@require_jumanpp
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""jumanpp""")
self.assertIsNotNone(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = """こんにちは、世界。\nこんばんは、世界。"""
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4])
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , """tokenizer.bin""")
with open(lowerCAmelCase__ , """wb""") as handle:
pickle.dump(lowerCAmelCase__ , lowerCAmelCase__)
with open(lowerCAmelCase__ , """rb""") as handle:
__SCREAMING_SNAKE_CASE = pickle.load(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer_new.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
@require_jumanpp
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = JumanppTokenizer(do_lower_case=lowerCAmelCase__)
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップル""", """ストア""", """で""", """iphone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = JumanppTokenizer(normalize_text=lowerCAmelCase__)
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""ア""", """ッ""", """フ""", """゚""", """ル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = JumanppTokenizer(trim_whitespace=lowerCAmelCase__)
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """。"""] , )
@require_jumanpp
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("""ありがとうございますm(_ _)m見つけるのが大変です。""") , ["""ありがとう""", """ございます""", """m(_ _)m""", """見つける""", """の""", """が""", """大変です""", """。"""] , )
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = ["""[UNK]""", """[CLS]""", """[SEP]""", """こんにちは""", """こん""", """にちは""", """ばんは""", """##こん""", """##にちは""", """##ばんは"""]
__SCREAMING_SNAKE_CASE = {}
for i, token in enumerate(lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = i
__SCREAMING_SNAKE_CASE = WordpieceTokenizer(vocab=lowerCAmelCase__ , unk_token="""[UNK]""")
self.assertListEqual(tokenizer.tokenize("""""") , [])
self.assertListEqual(tokenizer.tokenize("""こんにちは""") , ["""こんにちは"""])
self.assertListEqual(tokenizer.tokenize("""こんばんは""") , ["""こん""", """##ばんは"""])
self.assertListEqual(tokenizer.tokenize("""こんばんは こんばんにちは こんにちは""") , ["""こん""", """##ばんは""", """[UNK]""", """こんにちは"""])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = BertJapaneseTokenizer.from_pretrained("""nlp-waseda/roberta-base-japanese-with-auto-jumanpp""")
__SCREAMING_SNAKE_CASE = tokenizer.subword_tokenizer
__SCREAMING_SNAKE_CASE = subword_tokenizer.tokenize("""国境 の 長い トンネル を 抜ける と 雪国 であった 。""")
self.assertListEqual(lowerCAmelCase__ , ["""▁国境""", """▁の""", """▁長い""", """▁トンネル""", """▁を""", """▁抜ける""", """▁と""", """▁雪""", """国""", """▁であった""", """▁。"""])
__SCREAMING_SNAKE_CASE = subword_tokenizer.tokenize("""こんばんは こんばん にち は こんにちは""")
self.assertListEqual(lowerCAmelCase__ , ["""▁こん""", """ばん""", """は""", """▁こん""", """ばん""", """▁に""", """ち""", """▁は""", """▁こんにちは"""])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese""")
__SCREAMING_SNAKE_CASE = tokenizer.encode("""ありがとう。""" , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.encode("""どういたしまして。""" , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__)
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class SCREAMING_SNAKE_CASE_ ( __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : str = BertJapaneseTokenizer
__lowercase : int = False
def snake_case_ ( self):
super().setUp()
__SCREAMING_SNAKE_CASE = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens]))
def snake_case_ ( self , **lowerCAmelCase__):
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="""character""" , **lowerCAmelCase__)
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = """こんにちは、世界。 \nこんばんは、世界。"""
__SCREAMING_SNAKE_CASE = """こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"""
return input_text, output_text
def snake_case_ ( self):
pass # TODO add if relevant
def snake_case_ ( self):
pass # TODO add if relevant
def snake_case_ ( self):
pass # TODO add if relevant
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="""character""")
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""こんにちは、世界。 \nこんばんは、世界。""")
self.assertListEqual(
lowerCAmelCase__ , ["""こ""", """ん""", """に""", """ち""", """は""", """、""", """世""", """界""", """。""", """こ""", """ん""", """ば""", """ん""", """は""", """、""", """世""", """界""", """。"""])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [3, 4, 5, 6, 7, 1_1, 9, 1_0, 1_2, 3, 4, 8, 4, 7, 1_1, 9, 1_0, 1_2])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
__SCREAMING_SNAKE_CASE = {}
for i, token in enumerate(lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = i
__SCREAMING_SNAKE_CASE = CharacterTokenizer(vocab=lowerCAmelCase__ , unk_token="""[UNK]""")
self.assertListEqual(tokenizer.tokenize("""""") , [])
self.assertListEqual(tokenizer.tokenize("""こんにちは""") , ["""こ""", """ん""", """に""", """ち""", """は"""])
self.assertListEqual(tokenizer.tokenize("""こんにちほ""") , ["""こ""", """ん""", """に""", """ち""", """[UNK]"""])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese-char""")
__SCREAMING_SNAKE_CASE = tokenizer.encode("""ありがとう。""" , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.encode("""どういたしまして。""" , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__)
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = """cl-tohoku/bert-base-japanese"""
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(lowerCAmelCase__)
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__)
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = """cl-tohoku/bert-base-japanese"""
with self.assertLogs("""transformers""" , level="""WARNING""") as cm:
BertTokenizer.from_pretrained(lowerCAmelCase__)
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from."""))
__SCREAMING_SNAKE_CASE = """bert-base-cased"""
with self.assertLogs("""transformers""" , level="""WARNING""") as cm:
BertJapaneseTokenizer.from_pretrained(lowerCAmelCase__)
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from."""))
| 155 | 0 |
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
lowerCAmelCase : List[Any] =open # noqa: we just need to have a builtin inside this module to test it properly
| 15 | import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Dict =logging.get_logger(__name__)
lowerCAmelCase : Dict ={"vocab_file": "vocab.json"}
lowerCAmelCase : List[str] ={
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
lowerCAmelCase : int ={"mgp-str": 27}
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : int="[GO]" , _UpperCamelCase : Any="[GO]" , _UpperCamelCase : Optional[Any]="[s]" , _UpperCamelCase : List[str]="[GO]" , **_UpperCamelCase : Dict) ->Union[str, Any]:
"""simple docstring"""
super().__init__(
unk_token=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , pad_token=_UpperCamelCase , **_UpperCamelCase , )
with open(_UpperCamelCase , encoding="""utf-8""") as vocab_handle:
_lowerCamelCase : Optional[Any] = json.load(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = {v: k for k, v in self.vocab.items()}
@property
def _SCREAMING_SNAKE_CASE ( self : str) ->Any:
"""simple docstring"""
return len(self.vocab)
def _SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]:
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Union[str, Any]) ->Any:
"""simple docstring"""
_lowerCamelCase : Tuple = []
for s in text:
char_tokens.extend(_UpperCamelCase)
return char_tokens
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : int) ->Optional[int]:
"""simple docstring"""
return self.vocab.get(_UpperCamelCase , self.vocab.get(self.unk_token))
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : Optional[Any]) ->Dict:
"""simple docstring"""
return self.decoder.get(_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None) ->Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_UpperCamelCase):
logger.error("""Vocabulary path ({}) should be a directory""".format(_UpperCamelCase))
return
_lowerCamelCase : Tuple = os.path.join(
_UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
with open(_UpperCamelCase , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=_UpperCamelCase , ensure_ascii=_UpperCamelCase) + """\n""")
return (vocab_file,)
| 15 | 1 |
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = [False] * len(_A )
lowerCAmelCase_ = [-1] * len(_A )
def dfs(_A , _A ):
lowerCAmelCase_ = True
lowerCAmelCase_ = c
for u in graph[v]:
if not visited[u]:
dfs(_A , 1 - c )
for i in range(len(_A ) ):
if not visited[i]:
dfs(_A , 0 )
for i in range(len(_A ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_A = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 431 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class A :
def __init__( self, UpperCamelCase__, UpperCamelCase__=13, UpperCamelCase__=10, UpperCamelCase__=3, UpperCamelCase__=2, UpperCamelCase__=2, UpperCamelCase__=2, UpperCamelCase__=True, UpperCamelCase__=True, UpperCamelCase__=32, UpperCamelCase__=5, UpperCamelCase__=4, UpperCamelCase__=37, UpperCamelCase__="gelu", UpperCamelCase__=0.1, UpperCamelCase__=0.1, UpperCamelCase__=10, UpperCamelCase__=0.02, UpperCamelCase__=0.9, UpperCamelCase__=None, ):
"""simple docstring"""
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = image_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = patch_size
lowerCAmelCase_ = tubelet_size
lowerCAmelCase_ = num_frames
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = type_sequence_label_size
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = mask_ratio
lowerCAmelCase_ = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
lowerCAmelCase_ = (image_size // patch_size) ** 2
lowerCAmelCase_ = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
lowerCAmelCase_ = int(mask_ratio * self.seq_length )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ = None
if self.use_labels:
lowerCAmelCase_ = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCAmelCase_ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return VideoMAEConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_frames=self.num_frames, tubelet_size=self.tubelet_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=UpperCamelCase__, initializer_range=self.initializer_range, )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = VideoMAEModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCAmelCase_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = VideoMAEForPreTraining(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowerCAmelCase_ = torch.ones((self.num_masks,) )
lowerCAmelCase_ = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
lowerCAmelCase_ = mask.expand(self.batch_size, -1 ).bool()
lowerCAmelCase_ = model(UpperCamelCase__, UpperCamelCase__ )
# model only returns predictions for masked patches
lowerCAmelCase_ = mask.sum().item()
lowerCAmelCase_ = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape, (self.batch_size, num_masked_patches, decoder_num_labels) )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = config_and_inputs
lowerCAmelCase_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__snake_case = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
__snake_case = (
{'feature-extraction': VideoMAEModel, 'video-classification': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = False
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = VideoMAEModelTester(self )
lowerCAmelCase_ = ConfigTester(self, config_class=UpperCamelCase__, has_text_modality=UpperCamelCase__, hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__=False ):
"""simple docstring"""
lowerCAmelCase_ = copy.deepcopy(UpperCamelCase__ )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowerCAmelCase_ = torch.ones((self.model_tester.num_masks,) )
lowerCAmelCase_ = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
lowerCAmelCase_ = mask.expand(self.model_tester.batch_size, -1 ).bool()
lowerCAmelCase_ = bool_masked_pos.to(UpperCamelCase__ )
if return_labels:
if model_class in [
*get_values(UpperCamelCase__ ),
]:
lowerCAmelCase_ = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=UpperCamelCase__ )
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''VideoMAE does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
lowerCAmelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__, nn.Linear ) )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ = model_class(UpperCamelCase__ )
lowerCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ = [*signature.parameters.keys()]
lowerCAmelCase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1], UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase__ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ = VideoMAEModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
if not self.has_attentions:
pass
else:
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ = True
for model_class in self.all_model_classes:
lowerCAmelCase_ = self.model_tester.seq_length - self.model_tester.num_masks
lowerCAmelCase_ = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
lowerCAmelCase_ = True
lowerCAmelCase_ = False
lowerCAmelCase_ = True
lowerCAmelCase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCAmelCase_ = model(**self._prepare_for_class(UpperCamelCase__, UpperCamelCase__ ) )
lowerCAmelCase_ = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ), self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase_ = True
lowerCAmelCase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCAmelCase_ = model(**self._prepare_for_class(UpperCamelCase__, UpperCamelCase__ ) )
lowerCAmelCase_ = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, seq_len, seq_len], )
lowerCAmelCase_ = len(UpperCamelCase__ )
# Check attention is always last and order is fine
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCAmelCase_ = model(**self._prepare_for_class(UpperCamelCase__, UpperCamelCase__ ) )
self.assertEqual(out_len + 1, len(UpperCamelCase__ ) )
lowerCAmelCase_ = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, seq_len, seq_len], )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
lowerCAmelCase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCAmelCase_ = model(**self._prepare_for_class(UpperCamelCase__, UpperCamelCase__ ) )
lowerCAmelCase_ = outputs.hidden_states
lowerCAmelCase_ = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(UpperCamelCase__ ), UpperCamelCase__ )
lowerCAmelCase_ = self.model_tester.seq_length - self.model_tester.num_masks
lowerCAmelCase_ = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [seq_length, self.model_tester.hidden_size], )
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ = True
check_hidden_states_output(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase_ = True
check_hidden_states_output(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
def __UpperCamelCase ( ):
lowerCAmelCase_ = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
lowerCAmelCase_ = np.load(_A )
return list(_A )
@require_torch
@require_vision
class A ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = VideoMAEForVideoClassification.from_pretrained('''MCG-NJU/videomae-base-finetuned-kinetics''' ).to(
UpperCamelCase__ )
lowerCAmelCase_ = self.default_image_processor
lowerCAmelCase_ = prepare_video()
lowerCAmelCase_ = image_processor(UpperCamelCase__, return_tensors='''pt''' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCAmelCase_ = model(**UpperCamelCase__ )
# verify the logits
lowerCAmelCase_ = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape, UpperCamelCase__ )
lowerCAmelCase_ = torch.tensor([0.3_669, -0.0_688, -0.2_421] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], UpperCamelCase__, atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' ).to(UpperCamelCase__ )
lowerCAmelCase_ = self.default_image_processor
lowerCAmelCase_ = prepare_video()
lowerCAmelCase_ = image_processor(UpperCamelCase__, return_tensors='''pt''' ).to(UpperCamelCase__ )
# add boolean mask, indicating which patches to mask
lowerCAmelCase_ = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''', filename='''bool_masked_pos.pt''' )
lowerCAmelCase_ = torch.load(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCAmelCase_ = model(**UpperCamelCase__ )
# verify the logits
lowerCAmelCase_ = torch.Size([1, 1408, 1536] )
lowerCAmelCase_ = torch.tensor(
[[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]], device=UpperCamelCase__ )
self.assertEqual(outputs.logits.shape, UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], UpperCamelCase__, atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
lowerCAmelCase_ = torch.tensor([0.5_142], device=UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.loss, UpperCamelCase__, atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
lowerCAmelCase_ = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''', norm_pix_loss=UpperCamelCase__ ).to(
UpperCamelCase__ )
with torch.no_grad():
lowerCAmelCase_ = model(**UpperCamelCase__ )
lowerCAmelCase_ = torch.tensor(torch.tensor([0.6_469] ), device=UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.loss, UpperCamelCase__, atol=1E-4 ) )
| 431 | 1 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
_UpperCAmelCase : Union[str, Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
_UpperCAmelCase : List[str] = {
"""vocab_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
},
}
_UpperCAmelCase : Optional[Any] = {
"""allenai/longformer-base-4096""": 4_096,
"""allenai/longformer-large-4096""": 4_096,
"""allenai/longformer-large-4096-finetuned-triviaqa""": 4_096,
"""allenai/longformer-base-4096-extra.pos.embd.only""": 4_096,
"""allenai/longformer-large-4096-extra.pos.embd.only""": 4_096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def A ( ) -> List[str]:
'''simple docstring'''
UpperCamelCase = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
UpperCamelCase = bs[:]
UpperCamelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowercase )
cs.append(2**8 + n )
n += 1
UpperCamelCase = [chr(lowercase ) for n in cs]
return dict(zip(lowercase , lowercase ) )
def A ( lowercase ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = set()
UpperCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCamelCase = char
return pairs
class lowercase ( _snake_case ):
__lowercase : Optional[Any] = VOCAB_FILES_NAMES
__lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowercase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Any = ["""input_ids""", """attention_mask"""]
def __init__( self , A_ , A_ , A_="replace" , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_=False , **A_ , ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else bos_token
UpperCamelCase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else eos_token
UpperCamelCase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else sep_token
UpperCamelCase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else cls_token
UpperCamelCase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else unk_token
UpperCamelCase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token
super().__init__(
errors=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , add_prefix_space=snake_case_ , **snake_case_ , )
with open(snake_case_ , encoding='utf-8' ) as vocab_handle:
UpperCamelCase = json.load(snake_case_ )
UpperCamelCase = {v: k for k, v in self.encoder.items()}
UpperCamelCase = errors # how to handle errors in decoding
UpperCamelCase = bytes_to_unicode()
UpperCamelCase = {v: k for k, v in self.byte_encoder.items()}
with open(snake_case_ , encoding='utf-8' ) as merges_handle:
UpperCamelCase = merges_handle.read().split('\n' )[1:-1]
UpperCamelCase = [tuple(merge.split() ) for merge in bpe_merges]
UpperCamelCase = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
UpperCamelCase = {}
UpperCamelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCamelCase = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
return len(self.encoder )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __UpperCamelCase ( self , A_ ) -> List[str]:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
UpperCamelCase = tuple(snake_case_ )
UpperCamelCase = get_pairs(snake_case_ )
if not pairs:
return token
while True:
UpperCamelCase = min(snake_case_ , key=lambda A_ : self.bpe_ranks.get(snake_case_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
UpperCamelCase = bigram
UpperCamelCase = []
UpperCamelCase = 0
while i < len(snake_case_ ):
try:
UpperCamelCase = word.index(snake_case_ , snake_case_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCamelCase = j
if word[i] == first and i < len(snake_case_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCamelCase = tuple(snake_case_ )
UpperCamelCase = new_word
if len(snake_case_ ) == 1:
break
else:
UpperCamelCase = get_pairs(snake_case_ )
UpperCamelCase = " ".join(snake_case_ )
UpperCamelCase = word
return word
def __UpperCamelCase ( self , A_ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = []
for token in re.findall(self.pat , snake_case_ ):
UpperCamelCase = "".join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(snake_case_ ).split(' ' ) )
return bpe_tokens
def __UpperCamelCase ( self , A_ ) -> List[Any]:
"""simple docstring"""
return self.encoder.get(snake_case_ , self.encoder.get(self.unk_token ) )
def __UpperCamelCase ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
return self.decoder.get(snake_case_ )
def __UpperCamelCase ( self , A_ ) -> Any:
"""simple docstring"""
UpperCamelCase = "".join(snake_case_ )
UpperCamelCase = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def __UpperCamelCase ( self , A_ , A_ = None ) -> Optional[Any]:
"""simple docstring"""
if not os.path.isdir(snake_case_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase = os.path.join(
snake_case_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase = os.path.join(
snake_case_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(snake_case_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case_ , ensure_ascii=snake_case_ ) + '\n' )
UpperCamelCase = 0
with open(snake_case_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
UpperCamelCase = token_index
writer.write(' '.join(snake_case_ ) + '\n' )
index += 1
return vocab_file, merge_file
def __UpperCamelCase ( self , A_ , A_ = None ) -> List[Any]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCamelCase ( self , A_ , A_ = None , A_ = False ) -> int:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case_ )) + [1]
return [1] + ([0] * len(snake_case_ )) + [1, 1] + ([0] * len(snake_case_ )) + [1]
def __UpperCamelCase ( self , A_ , A_ = None ) -> int:
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self , A_ , A_=False , **A_ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(snake_case_ ) > 0 and not text[0].isspace()):
UpperCamelCase = " " + text
return (text, kwargs)
| 720 |
import os
_UpperCAmelCase : int = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1_000}
def A ( lowercase ) -> int:
'''simple docstring'''
UpperCamelCase = 0
UpperCamelCase = 0
while index < len(lowercase ) - 1:
UpperCamelCase = SYMBOLS[numerals[index]]
UpperCamelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def A ( lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = ''
UpperCamelCase = num // 1_000
numerals += m_count * "M"
num %= 1_000
UpperCamelCase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
UpperCamelCase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def A ( lowercase = "/p089_roman.txt" ) -> int:
'''simple docstring'''
UpperCamelCase = 0
with open(os.path.dirname(lowercase ) + roman_numerals_filename ) as filea:
UpperCamelCase = filea.readlines()
for line in lines:
UpperCamelCase = line.strip()
UpperCamelCase = parse_roman_numerals(lowercase )
UpperCamelCase = generate_roman_numerals(lowercase )
savings += len(lowercase ) - len(lowercase )
return savings
if __name__ == "__main__":
print(F'''{solution() = }''')
| 3 | 0 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"microsoft/conditional-detr-resnet-50": (
"https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"
),
}
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : Tuple = """conditional_detr"""
_snake_case : List[Any] = ["""past_key_values"""]
_snake_case : str = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self :Dict , lowerCamelCase__ :Optional[Any]=True , lowerCamelCase__ :Any=None , lowerCamelCase__ :Dict=3 , lowerCamelCase__ :Dict=3_00 , lowerCamelCase__ :Union[str, Any]=6 , lowerCamelCase__ :List[Any]=20_48 , lowerCamelCase__ :List[str]=8 , lowerCamelCase__ :Any=6 , lowerCamelCase__ :Optional[int]=20_48 , lowerCamelCase__ :Tuple=8 , lowerCamelCase__ :Optional[int]=0.0 , lowerCamelCase__ :Optional[int]=0.0 , lowerCamelCase__ :Optional[Any]=True , lowerCamelCase__ :List[str]="relu" , lowerCamelCase__ :List[Any]=2_56 , lowerCamelCase__ :Optional[Any]=0.1 , lowerCamelCase__ :Dict=0.0 , lowerCamelCase__ :Optional[Any]=0.0 , lowerCamelCase__ :Tuple=0.02 , lowerCamelCase__ :Any=1.0 , lowerCamelCase__ :Dict=False , lowerCamelCase__ :str="sine" , lowerCamelCase__ :Dict="resnet50" , lowerCamelCase__ :str=True , lowerCamelCase__ :Optional[int]=False , lowerCamelCase__ :str=2 , lowerCamelCase__ :List[str]=5 , lowerCamelCase__ :List[Any]=2 , lowerCamelCase__ :str=1 , lowerCamelCase__ :Any=1 , lowerCamelCase__ :Optional[int]=2 , lowerCamelCase__ :Union[str, Any]=5 , lowerCamelCase__ :Optional[Any]=2 , lowerCamelCase__ :Union[str, Any]=0.25 , **lowerCamelCase__ :Dict , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
UpperCamelCase__ :Tuple = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
UpperCamelCase__ :int = backbone_config.get("""model_type""" )
UpperCamelCase__ :Optional[int] = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase__ :List[Any] = config_class.from_dict(lowerCamelCase__ )
UpperCamelCase__ :str = use_timm_backbone
UpperCamelCase__ :Dict = backbone_config
UpperCamelCase__ :Union[str, Any] = num_channels
UpperCamelCase__ :Optional[Any] = num_queries
UpperCamelCase__ :Any = d_model
UpperCamelCase__ :str = encoder_ffn_dim
UpperCamelCase__ :Union[str, Any] = encoder_layers
UpperCamelCase__ :Tuple = encoder_attention_heads
UpperCamelCase__ :Optional[Any] = decoder_ffn_dim
UpperCamelCase__ :Optional[int] = decoder_layers
UpperCamelCase__ :Optional[Any] = decoder_attention_heads
UpperCamelCase__ :Any = dropout
UpperCamelCase__ :Union[str, Any] = attention_dropout
UpperCamelCase__ :List[Any] = activation_dropout
UpperCamelCase__ :int = activation_function
UpperCamelCase__ :Tuple = init_std
UpperCamelCase__ :Any = init_xavier_std
UpperCamelCase__ :Any = encoder_layerdrop
UpperCamelCase__ :Optional[int] = decoder_layerdrop
UpperCamelCase__ :Optional[int] = encoder_layers
UpperCamelCase__ :Optional[Any] = auxiliary_loss
UpperCamelCase__ :List[Any] = position_embedding_type
UpperCamelCase__ :Any = backbone
UpperCamelCase__ :Dict = use_pretrained_backbone
UpperCamelCase__ :Tuple = dilation
# Hungarian matcher
UpperCamelCase__ :Optional[Any] = class_cost
UpperCamelCase__ :str = bbox_cost
UpperCamelCase__ :int = giou_cost
# Loss coefficients
UpperCamelCase__ :Dict = mask_loss_coefficient
UpperCamelCase__ :str = dice_loss_coefficient
UpperCamelCase__ :Union[str, Any] = cls_loss_coefficient
UpperCamelCase__ :List[Any] = bbox_loss_coefficient
UpperCamelCase__ :List[Any] = giou_loss_coefficient
UpperCamelCase__ :Dict = focal_alpha
super().__init__(is_encoder_decoder=lowerCamelCase__ , **lowerCamelCase__ )
@property
def __a ( self :Union[str, Any] ):
return self.encoder_attention_heads
@property
def __a ( self :int ):
return self.d_model
def __a ( self :Any ):
UpperCamelCase__ :str = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
UpperCamelCase__ :str = self.backbone_config.to_dict()
UpperCamelCase__ :Dict = self.__class__.model_type
return output
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : Union[str, Any] = version.parse("""1.11""" )
@property
def __a ( self :int ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def __a ( self :List[Any] ):
return 1e-5
@property
def __a ( self :Optional[int] ):
return 12 | 45 |
def A ( lowercase__ : int ) -> Optional[Any]:
stooge(lowercase__ , 0 , len(lowercase__ ) - 1 )
return arr
def A ( lowercase__ : Union[str, Any] , lowercase__ : Dict , lowercase__ : str ) -> List[str]:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
UpperCamelCase__ , UpperCamelCase__ :List[str] = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
UpperCamelCase__ :Optional[int] = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(lowercase__ , lowercase__ , (h - t) )
# Recursively sort last 2/3 elements
stooge(lowercase__ , i + t , (lowercase__) )
# Recursively sort first 2/3 elements
stooge(lowercase__ , lowercase__ , (h - t) )
if __name__ == "__main__":
UpperCamelCase = input("Enter numbers separated by a comma:\n").strip()
UpperCamelCase = [int(item) for item in user_input.split(",")]
print(stooge_sort(unsorted)) | 45 | 1 |
'''simple docstring'''
from math import sqrt
def A_ ( SCREAMING_SNAKE_CASE_ = 1_00_00_00 ) ->int:
lowercase_ = 0
lowercase_ = 0
lowercase_ = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(SCREAMING_SNAKE_CASE_ , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''')
| 603 | '''simple docstring'''
import os
import pytest
from transformers.dynamic_module_utils import get_imports
__snake_case = """
import os
"""
__snake_case = """
def foo():
import os
return False
"""
__snake_case = """
def foo():
def bar():
if True:
import os
return False
return bar()
"""
__snake_case = """
import os
try:
import bar
except ImportError:
raise ValueError()
"""
__snake_case = """
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
"""
__snake_case = """
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
"""
__snake_case = """
import os
try:
import bar
except ImportError as e:
raise ValueError()
"""
__snake_case = """
import os
try:
import bar
except:
raise ValueError()
"""
__snake_case = """
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
"""
__snake_case = """
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
"""
__snake_case = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize("""case""" , SCREAMING_SNAKE_CASE_ )
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->int:
lowercase_ = os.path.join(SCREAMING_SNAKE_CASE_ , """test_file.py""" )
with open(SCREAMING_SNAKE_CASE_ , """w""" ) as _tmp_file:
_tmp_file.write(SCREAMING_SNAKE_CASE_ )
lowercase_ = get_imports(SCREAMING_SNAKE_CASE_ )
assert parsed_imports == ["os"]
| 603 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.