code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import mpmath # for roots of unity
import numpy as np
class UpperCamelCase :
def __init__( self : Optional[int] , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Dict=None ) -> List[str]:
# Input as list
_a : Union[str, Any] = list(poly_a or [0] )[:]
_a : List[str] = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
_a : str = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
_a : Any = len(self.polyB )
# Add 0 to make lengths equal a power of 2
_a : Optional[int] = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
_a : Union[str, Any] = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
_a : List[str] = self.__multiply()
def _lowercase ( self : List[Any] , UpperCAmelCase__ : List[str] ) -> int:
_a : Any = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB]
# Corner case
if len(UpperCAmelCase__ ) <= 1:
return dft[0]
#
_a : Union[str, Any] = self.c_max_length // 2
while next_ncol > 0:
_a : List[Any] = [[] for i in range(UpperCAmelCase__ )]
_a : Optional[int] = self.root**next_ncol
# First half of next step
_a : Any = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase__ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
_a : str = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase__ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
_a : List[Any] = new_dft
_a : Union[str, Any] = next_ncol // 2
return dft[0]
def _lowercase ( self : Dict ) -> Optional[int]:
_a : Union[str, Any] = self.__dft("""A""" )
_a : Dict = self.__dft("""B""" )
_a : int = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
_a : str = 2
while next_ncol <= self.c_max_length:
_a : Any = [[] for i in range(UpperCAmelCase__ )]
_a : Optional[int] = self.root ** (next_ncol // 2)
_a : Any = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
_a : Dict = new_inverse_c
next_ncol *= 2
# Unpack
_a : Dict = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : str ) -> Any:
_a : List[Any] = """A = """ + """ + """.join(
f"""{coef}*x^{i}""" for coef, i in enumerate(self.polyA[: self.len_A] ) )
_a : Any = """B = """ + """ + """.join(
f"""{coef}*x^{i}""" for coef, i in enumerate(self.polyB[: self.len_B] ) )
_a : Union[str, Any] = """A*B = """ + """ + """.join(
f"""{coef}*x^{i}""" for coef, i in enumerate(self.product ) )
return f"""{a}\n{b}\n{c}"""
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 389 |
"""simple docstring"""
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : Tuple = 0
for i in range(1 , 1_0_0_1 ):
total += i**i
return str(UpperCamelCase__ )[-1_0:]
if __name__ == "__main__":
print(solution())
| 389 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
__UpperCAmelCase = 42
__UpperCAmelCase = None
__UpperCAmelCase = None
def __lowerCAmelCase ( ) -> Node | None:
_UpperCamelCase : List[str] = Node(1 )
_UpperCamelCase : Optional[int] = Node(2 )
_UpperCamelCase : List[Any] = Node(3 )
_UpperCamelCase : List[str] = Node(4 )
_UpperCamelCase : int = Node(5 )
return tree
def __lowerCAmelCase ( __lowerCAmelCase : Node | None ) -> list[int]:
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def __lowerCAmelCase ( __lowerCAmelCase : Node | None ) -> list[int]:
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def __lowerCAmelCase ( __lowerCAmelCase : Node | None ) -> list[int]:
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def __lowerCAmelCase ( __lowerCAmelCase : Node | None ) -> int:
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def __lowerCAmelCase ( __lowerCAmelCase : Node | None ) -> Sequence[Node | None]:
_UpperCamelCase : list[Any] = []
if root is None:
return output
_UpperCamelCase : List[str] = deque([root] )
while process_queue:
_UpperCamelCase : Tuple = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def __lowerCAmelCase ( __lowerCAmelCase : Node | None , __lowerCAmelCase : int ) -> Sequence[Node | None]:
_UpperCamelCase : list[Any] = []
def populate_output(__lowerCAmelCase : Node | None , __lowerCAmelCase : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(__lowerCAmelCase , __lowerCAmelCase )
return output
def __lowerCAmelCase ( __lowerCAmelCase : Node | None , __lowerCAmelCase : int ) -> Sequence[Node | None]:
_UpperCamelCase : list[Any] = []
def populate_output(__lowerCAmelCase : Node | None , __lowerCAmelCase : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(__lowerCAmelCase , __lowerCAmelCase )
return output
def __lowerCAmelCase ( __lowerCAmelCase : Node | None ) -> Sequence[Node | None] | list[Any]:
if root is None:
return []
_UpperCamelCase : list[Sequence[Node | None]] = []
_UpperCamelCase : Optional[Any] = 0
_UpperCamelCase : Any = height(__lowerCAmelCase )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(__lowerCAmelCase , __lowerCAmelCase ) )
_UpperCamelCase : Tuple = 1
else:
output.append(get_nodes_from_right_to_left(__lowerCAmelCase , __lowerCAmelCase ) )
_UpperCamelCase : List[str] = 0
return output
def __lowerCAmelCase ( ) -> None: # Main function for testing.
_UpperCamelCase : Dict = make_tree()
print(f"In-order Traversal: {inorder(__lowerCAmelCase )}" )
print(f"Pre-order Traversal: {preorder(__lowerCAmelCase )}" )
print(f"Post-order Traversal: {postorder(__lowerCAmelCase )}" , "\n" )
print(f"Height of Tree: {height(__lowerCAmelCase )}" , "\n" )
print("Complete Level Order Traversal: " )
print(level_order(__lowerCAmelCase ) , "\n" )
print("Level-wise order Traversal: " )
for level in range(1 , height(__lowerCAmelCase ) + 1 ):
print(f"Level {level}:" , get_nodes_from_left_to_right(__lowerCAmelCase , level=__lowerCAmelCase ) )
print("\nZigZag order Traversal: " )
print(zigzag(__lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 239 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCAmelCase = """vit_mae"""
def __init__(self , lowerCAmelCase__=7_68 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=30_72 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=2_24 , lowerCAmelCase__=16 , lowerCAmelCase__=3 , lowerCAmelCase__=True , lowerCAmelCase__=16 , lowerCAmelCase__=5_12 , lowerCAmelCase__=8 , lowerCAmelCase__=20_48 , lowerCAmelCase__=0.75 , lowerCAmelCase__=False , **lowerCAmelCase__ , ):
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
_UpperCamelCase : Union[str, Any] = hidden_size
_UpperCamelCase : Optional[int] = num_hidden_layers
_UpperCamelCase : str = num_attention_heads
_UpperCamelCase : Tuple = intermediate_size
_UpperCamelCase : Tuple = hidden_act
_UpperCamelCase : Optional[Any] = hidden_dropout_prob
_UpperCamelCase : int = attention_probs_dropout_prob
_UpperCamelCase : Union[str, Any] = initializer_range
_UpperCamelCase : Tuple = layer_norm_eps
_UpperCamelCase : Dict = image_size
_UpperCamelCase : Union[str, Any] = patch_size
_UpperCamelCase : List[Any] = num_channels
_UpperCamelCase : Optional[int] = qkv_bias
_UpperCamelCase : List[str] = decoder_num_attention_heads
_UpperCamelCase : int = decoder_hidden_size
_UpperCamelCase : Dict = decoder_num_hidden_layers
_UpperCamelCase : Dict = decoder_intermediate_size
_UpperCamelCase : str = mask_ratio
_UpperCamelCase : List[str] = norm_pix_loss
| 239 | 1 |
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__UpperCamelCase : Optional[Any] = True
except ImportError:
__UpperCamelCase : str = False
__UpperCamelCase : int = logging.get_logger(__name__) # pylint: disable=invalid-name
def _UpperCAmelCase ( UpperCAmelCase : Namespace ):
"""simple docstring"""
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class _UpperCamelCase ( A ):
'''simple docstring'''
@staticmethod
def _snake_case ( _lowerCamelCase : ArgumentParser ):
'''simple docstring'''
__lowerCamelCase : List[Any] = parser.add_parser("""add-new-model""" )
add_new_model_parser.add_argument("""--testing""" , action="""store_true""" , help="""If in testing mode.""" )
add_new_model_parser.add_argument("""--testing_file""" , type=_lowerCamelCase , help="""Configuration file on which to run.""" )
add_new_model_parser.add_argument(
"""--path""" , type=_lowerCamelCase , help="""Path to cookiecutter. Should only be used for testing purposes.""" )
add_new_model_parser.set_defaults(func=_lowerCamelCase )
def __init__( self : Optional[Any] , _lowerCamelCase : bool , _lowerCamelCase : str , _lowerCamelCase : int=None , *_lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase : Any = testing
__lowerCamelCase : Union[str, Any] = testing_file
__lowerCamelCase : int = path
def _snake_case ( self : Any ):
'''simple docstring'''
warnings.warn(
"""The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. """
"""It is not actively maintained anymore, so might give a result that won't pass all tests and quality """
"""checks, you should use `transformers-cli add-new-model-like` instead.""" )
if not _has_cookiecutter:
raise ImportError(
"""Model creation dependencies are required to use the `add_new_model` command. Install them by running """
"""the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n""" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
__lowerCamelCase : Tuple = [directory for directory in os.listdir() if """cookiecutter-template-""" == directory[:2_2]]
if len(_lowerCamelCase ) > 0:
raise ValueError(
"""Several directories starting with `cookiecutter-template-` in current working directory. """
"""Please clean your directory by removing all folders starting with `cookiecutter-template-` or """
"""change your working directory.""" )
__lowerCamelCase : List[str] = (
Path(_lowerCamelCase ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
__lowerCamelCase : List[Any] = path_to_transformer_root / """templates""" / """adding_a_new_model"""
# Execute cookiecutter
if not self._testing:
cookiecutter(str(_lowerCamelCase ) )
else:
with open(self._testing_file , """r""" ) as configuration_file:
__lowerCamelCase : Union[str, Any] = json.load(_lowerCamelCase )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=_lowerCamelCase , extra_context=_lowerCamelCase , )
__lowerCamelCase : Tuple = [directory for directory in os.listdir() if """cookiecutter-template-""" in directory[:2_2]][0]
# Retrieve configuration
with open(directory + """/configuration.json""" , """r""" ) as configuration_file:
__lowerCamelCase : Optional[Any] = json.load(_lowerCamelCase )
__lowerCamelCase : List[Any] = configuration["""lowercase_modelname"""]
__lowerCamelCase : Dict = configuration["""generate_tensorflow_pytorch_and_flax"""]
os.remove(F"""{directory}/configuration.json""" )
__lowerCamelCase : Optional[Any] = """PyTorch""" in generate_tensorflow_pytorch_and_flax
__lowerCamelCase : List[str] = """TensorFlow""" in generate_tensorflow_pytorch_and_flax
__lowerCamelCase : Tuple = """Flax""" in generate_tensorflow_pytorch_and_flax
__lowerCamelCase : List[str] = F"""{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"""
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
os.makedirs(F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}""" , exist_ok=_lowerCamelCase )
# Tests require submodules as they have parent imports
with open(F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py""" , """w""" ):
pass
shutil.move(
F"""{directory}/__init__.py""" , F"""{model_dir}/__init__.py""" , )
shutil.move(
F"""{directory}/configuration_{lowercase_model_name}.py""" , F"""{model_dir}/configuration_{lowercase_model_name}.py""" , )
def remove_copy_lines(_lowerCamelCase : str ):
with open(_lowerCamelCase , """r""" ) as f:
__lowerCamelCase : Optional[Any] = f.readlines()
with open(_lowerCamelCase , """w""" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(_lowerCamelCase )
if output_pytorch:
if not self._testing:
remove_copy_lines(F"""{directory}/modeling_{lowercase_model_name}.py""" )
shutil.move(
F"""{directory}/modeling_{lowercase_model_name}.py""" , F"""{model_dir}/modeling_{lowercase_model_name}.py""" , )
shutil.move(
F"""{directory}/test_modeling_{lowercase_model_name}.py""" , F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py""" , )
else:
os.remove(F"""{directory}/modeling_{lowercase_model_name}.py""" )
os.remove(F"""{directory}/test_modeling_{lowercase_model_name}.py""" )
if output_tensorflow:
if not self._testing:
remove_copy_lines(F"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
shutil.move(
F"""{directory}/modeling_tf_{lowercase_model_name}.py""" , F"""{model_dir}/modeling_tf_{lowercase_model_name}.py""" , )
shutil.move(
F"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" , F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py""" , )
else:
os.remove(F"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
os.remove(F"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" )
if output_flax:
if not self._testing:
remove_copy_lines(F"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
F"""{directory}/modeling_flax_{lowercase_model_name}.py""" , F"""{model_dir}/modeling_flax_{lowercase_model_name}.py""" , )
shutil.move(
F"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" , F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py""" , )
else:
os.remove(F"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
os.remove(F"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
F"""{directory}/{lowercase_model_name}.md""" , F"""{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md""" , )
shutil.move(
F"""{directory}/tokenization_{lowercase_model_name}.py""" , F"""{model_dir}/tokenization_{lowercase_model_name}.py""" , )
shutil.move(
F"""{directory}/tokenization_fast_{lowercase_model_name}.py""" , F"""{model_dir}/tokenization_{lowercase_model_name}_fast.py""" , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(_lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : List[str] ):
# Create temp file
__lowerCamelCase , __lowerCamelCase : Optional[int] = mkstemp()
__lowerCamelCase : str = False
with fdopen(_lowerCamelCase , """w""" ) as new_file:
with open(_lowerCamelCase ) as old_file:
for line in old_file:
new_file.write(_lowerCamelCase )
if line_to_copy_below in line:
__lowerCamelCase : Optional[int] = True
for line_to_copy in lines_to_copy:
new_file.write(_lowerCamelCase )
if not line_found:
raise ValueError(F"""Line {line_to_copy_below} was not found in file.""" )
# Copy the file permissions from the old file to the new file
copymode(_lowerCamelCase , _lowerCamelCase )
# Remove original file
remove(_lowerCamelCase )
# Move new file
move(_lowerCamelCase , _lowerCamelCase )
def skip_units(_lowerCamelCase : List[str] ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(_lowerCamelCase : str ):
with open(_lowerCamelCase ) as datafile:
__lowerCamelCase : Any = []
__lowerCamelCase : Optional[Any] = False
__lowerCamelCase : Optional[int] = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
__lowerCamelCase : Dict = line.split("""\"""" )[1]
__lowerCamelCase : Any = skip_units(_lowerCamelCase )
elif "# Below: " in line and "##" not in line:
__lowerCamelCase : str = line.split("""\"""" )[1]
__lowerCamelCase : Union[str, Any] = skip_units(_lowerCamelCase )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__lowerCamelCase : List[Any] = []
elif "# Replace with" in line and "##" not in line:
__lowerCamelCase : List[str] = []
elif "##" not in line:
lines_to_copy.append(_lowerCamelCase )
remove(_lowerCamelCase )
replace_in_files(F"""{directory}/to_replace_{lowercase_model_name}.py""" )
os.rmdir(_lowerCamelCase )
| 519 |
def _UpperCAmelCase ( UpperCAmelCase : list ):
"""simple docstring"""
__lowerCamelCase : Tuple = 0
while len(UpperCAmelCase ) > 1:
__lowerCamelCase : List[str] = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
__lowerCamelCase : List[Any] = files.index(min(UpperCAmelCase ) )
temp += files[min_index]
files.pop(UpperCAmelCase )
files.append(UpperCAmelCase )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 519 | 1 |
'''simple docstring'''
def __A ( UpperCAmelCase = 1 ,UpperCAmelCase = 1_0_0_0 ) -> int:
'''simple docstring'''
_UpperCamelCase : List[str] = 1
_UpperCamelCase : List[str] = 0
for divide_by_number in range(UpperCAmelCase ,digit + 1 ):
_UpperCamelCase : list[int] = []
_UpperCamelCase : Any = numerator
for _ in range(1 ,digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(UpperCAmelCase ):
_UpperCamelCase : int = len(UpperCAmelCase )
_UpperCamelCase : Tuple = divide_by_number
else:
has_been_divided.append(UpperCAmelCase )
_UpperCamelCase : List[Any] = now_divide * 1_0 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 204 | '''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase__ = 42
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase__ : List[str]=3 , lowercase__ : Tuple=3 , lowercase__ : List[str]=("DownEncoderBlock2D",) , lowercase__ : Optional[int]=(64,) , lowercase__ : List[Any]=2 , lowercase__ : int=32 , lowercase__ : Optional[int]="silu" , lowercase__ : List[Any]=True , ) ->Optional[Any]:
'''simple docstring'''
super().__init__()
_UpperCamelCase : Union[str, Any] = layers_per_block
_UpperCamelCase : Union[str, Any] = torch.nn.Convad(
lowercase__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
_UpperCamelCase : int = None
_UpperCamelCase : Optional[Any] = nn.ModuleList([] )
# down
_UpperCamelCase : Optional[int] = block_out_channels[0]
for i, down_block_type in enumerate(lowercase__ ):
_UpperCamelCase : int = output_channel
_UpperCamelCase : int = block_out_channels[i]
_UpperCamelCase : Tuple = i == len(lowercase__ ) - 1
_UpperCamelCase : List[Any] = get_down_block(
lowercase__ , num_layers=self.layers_per_block , in_channels=lowercase__ , out_channels=lowercase__ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=lowercase__ , resnet_groups=lowercase__ , attention_head_dim=lowercase__ , temb_channels=lowercase__ , )
self.down_blocks.append(lowercase__ )
# mid
_UpperCamelCase : Dict = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowercase__ , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=lowercase__ , temb_channels=lowercase__ , )
# out
_UpperCamelCase : List[str] = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowercase__ , eps=1e-6 )
_UpperCamelCase : Union[str, Any] = nn.SiLU()
_UpperCamelCase : Tuple = 2 * out_channels if double_z else out_channels
_UpperCamelCase : List[Any] = nn.Convad(block_out_channels[-1] , lowercase__ , 3 , padding=1 )
_UpperCamelCase : Tuple = False
def snake_case__ ( self : str , lowercase__ : Any ) ->Any:
'''simple docstring'''
_UpperCamelCase : Dict = x
_UpperCamelCase : str = self.conv_in(lowercase__ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowercase__ : Any ):
def custom_forward(*lowercase__ : List[Any] ):
return module(*lowercase__ )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
_UpperCamelCase : Optional[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowercase__ ) , lowercase__ , use_reentrant=lowercase__ )
# middle
_UpperCamelCase : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowercase__ , use_reentrant=lowercase__ )
else:
for down_block in self.down_blocks:
_UpperCamelCase : List[str] = torch.utils.checkpoint.checkpoint(create_custom_forward(lowercase__ ) , lowercase__ )
# middle
_UpperCamelCase : Union[str, Any] = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowercase__ )
else:
# down
for down_block in self.down_blocks:
_UpperCamelCase : Tuple = down_block(lowercase__ )
# middle
_UpperCamelCase : Optional[int] = self.mid_block(lowercase__ )
# post-process
_UpperCamelCase : List[Any] = self.conv_norm_out(lowercase__ )
_UpperCamelCase : Tuple = self.conv_act(lowercase__ )
_UpperCamelCase : int = self.conv_out(lowercase__ )
return sample
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowercase__ : str=3 , lowercase__ : List[Any]=3 , lowercase__ : Optional[int]=("UpDecoderBlock2D",) , lowercase__ : Any=(64,) , lowercase__ : str=2 , lowercase__ : List[Any]=32 , lowercase__ : Optional[int]="silu" , lowercase__ : List[str]="group" , ) ->Any:
'''simple docstring'''
super().__init__()
_UpperCamelCase : List[Any] = layers_per_block
_UpperCamelCase : Any = nn.Convad(
lowercase__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
_UpperCamelCase : List[str] = None
_UpperCamelCase : Dict = nn.ModuleList([] )
_UpperCamelCase : Optional[int] = in_channels if norm_type == "spatial" else None
# mid
_UpperCamelCase : int = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowercase__ , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowercase__ , temb_channels=lowercase__ , )
# up
_UpperCamelCase : Dict = list(reversed(lowercase__ ) )
_UpperCamelCase : Tuple = reversed_block_out_channels[0]
for i, up_block_type in enumerate(lowercase__ ):
_UpperCamelCase : Union[str, Any] = output_channel
_UpperCamelCase : Dict = reversed_block_out_channels[i]
_UpperCamelCase : List[str] = i == len(lowercase__ ) - 1
_UpperCamelCase : Tuple = get_up_block(
lowercase__ , num_layers=self.layers_per_block + 1 , in_channels=lowercase__ , out_channels=lowercase__ , prev_output_channel=lowercase__ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=lowercase__ , resnet_groups=lowercase__ , attention_head_dim=lowercase__ , temb_channels=lowercase__ , resnet_time_scale_shift=lowercase__ , )
self.up_blocks.append(lowercase__ )
_UpperCamelCase : Any = output_channel
# out
if norm_type == "spatial":
_UpperCamelCase : Any = SpatialNorm(block_out_channels[0] , lowercase__ )
else:
_UpperCamelCase : int = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowercase__ , eps=1e-6 )
_UpperCamelCase : Tuple = nn.SiLU()
_UpperCamelCase : Tuple = nn.Convad(block_out_channels[0] , lowercase__ , 3 , padding=1 )
_UpperCamelCase : Optional[Any] = False
def snake_case__ ( self : Tuple , lowercase__ : Union[str, Any] , lowercase__ : Optional[Any]=None ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : str = z
_UpperCamelCase : str = self.conv_in(lowercase__ )
_UpperCamelCase : Any = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowercase__ : Optional[int] ):
def custom_forward(*lowercase__ : List[str] ):
return module(*lowercase__ )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
_UpperCamelCase : Dict = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowercase__ , lowercase__ , use_reentrant=lowercase__ )
_UpperCamelCase : Union[str, Any] = sample.to(lowercase__ )
# up
for up_block in self.up_blocks:
_UpperCamelCase : Dict = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowercase__ ) , lowercase__ , lowercase__ , use_reentrant=lowercase__ )
else:
# middle
_UpperCamelCase : Any = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowercase__ , lowercase__ )
_UpperCamelCase : int = sample.to(lowercase__ )
# up
for up_block in self.up_blocks:
_UpperCamelCase : Dict = torch.utils.checkpoint.checkpoint(create_custom_forward(lowercase__ ) , lowercase__ , lowercase__ )
else:
# middle
_UpperCamelCase : str = self.mid_block(lowercase__ , lowercase__ )
_UpperCamelCase : int = sample.to(lowercase__ )
# up
for up_block in self.up_blocks:
_UpperCamelCase : str = up_block(lowercase__ , lowercase__ )
# post-process
if latent_embeds is None:
_UpperCamelCase : Union[str, Any] = self.conv_norm_out(lowercase__ )
else:
_UpperCamelCase : Any = self.conv_norm_out(lowercase__ , lowercase__ )
_UpperCamelCase : List[str] = self.conv_act(lowercase__ )
_UpperCamelCase : Dict = self.conv_out(lowercase__ )
return sample
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase__ : str , lowercase__ : Tuple , lowercase__ : Optional[Any] , lowercase__ : Tuple=None , lowercase__ : Dict="random" , lowercase__ : List[Any]=False , lowercase__ : List[Any]=True ) ->Optional[int]:
'''simple docstring'''
super().__init__()
_UpperCamelCase : List[Any] = n_e
_UpperCamelCase : List[str] = vq_embed_dim
_UpperCamelCase : List[str] = beta
_UpperCamelCase : Any = legacy
_UpperCamelCase : Optional[int] = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
_UpperCamelCase : str = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
_UpperCamelCase : List[Any] = self.used.shape[0]
_UpperCamelCase : Optional[int] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
_UpperCamelCase : Any = self.re_embed
_UpperCamelCase : Any = self.re_embed + 1
print(
f'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
f'''Using {self.unknown_index} for unknown indices.''' )
else:
_UpperCamelCase : Optional[Any] = n_e
_UpperCamelCase : Optional[Any] = sane_index_shape
def snake_case__ ( self : str , lowercase__ : Dict ) ->Optional[int]:
'''simple docstring'''
_UpperCamelCase : str = inds.shape
assert len(lowercase__ ) > 1
_UpperCamelCase : List[Any] = inds.reshape(ishape[0] , -1 )
_UpperCamelCase : Tuple = self.used.to(lowercase__ )
_UpperCamelCase : List[str] = (inds[:, :, None] == used[None, None, ...]).long()
_UpperCamelCase : Optional[Any] = match.argmax(-1 )
_UpperCamelCase : int = match.sum(2 ) < 1
if self.unknown_index == "random":
_UpperCamelCase : int = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
_UpperCamelCase : Optional[int] = self.unknown_index
return new.reshape(lowercase__ )
def snake_case__ ( self : Any , lowercase__ : Optional[Any] ) ->int:
'''simple docstring'''
_UpperCamelCase : Tuple = inds.shape
assert len(lowercase__ ) > 1
_UpperCamelCase : Dict = inds.reshape(ishape[0] , -1 )
_UpperCamelCase : Optional[Any] = self.used.to(lowercase__ )
if self.re_embed > self.used.shape[0]: # extra token
_UpperCamelCase : int = 0 # simply set to zero
_UpperCamelCase : Tuple = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowercase__ )
return back.reshape(lowercase__ )
def snake_case__ ( self : List[Any] , lowercase__ : Union[str, Any] ) ->Any:
'''simple docstring'''
_UpperCamelCase : Any = z.permute(0 , 2 , 3 , 1 ).contiguous()
_UpperCamelCase : Optional[Any] = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
_UpperCamelCase : Optional[Any] = torch.argmin(torch.cdist(lowercase__ , self.embedding.weight ) , dim=1 )
_UpperCamelCase : List[str] = self.embedding(lowercase__ ).view(z.shape )
_UpperCamelCase : Optional[int] = None
_UpperCamelCase : Tuple = None
# compute loss for embedding
if not self.legacy:
_UpperCamelCase : Any = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
_UpperCamelCase : str = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
_UpperCamelCase : List[Any] = z + (z_q - z).detach()
# reshape back to match original input shape
_UpperCamelCase : List[Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
_UpperCamelCase : str = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
_UpperCamelCase : int = self.remap_to_used(lowercase__ )
_UpperCamelCase : List[str] = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
_UpperCamelCase : Tuple = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def snake_case__ ( self : Optional[Any] , lowercase__ : List[str] , lowercase__ : List[str] ) ->Dict:
'''simple docstring'''
if self.remap is not None:
_UpperCamelCase : Tuple = indices.reshape(shape[0] , -1 ) # add batch axis
_UpperCamelCase : str = self.unmap_to_all(lowercase__ )
_UpperCamelCase : str = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
_UpperCamelCase : Dict = self.embedding(lowercase__ )
if shape is not None:
_UpperCamelCase : List[Any] = z_q.view(lowercase__ )
# reshape back to match original input shape
_UpperCamelCase : Optional[int] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Dict , lowercase__ : str , lowercase__ : List[Any]=False ) ->Any:
'''simple docstring'''
_UpperCamelCase : Tuple = parameters
_UpperCamelCase , _UpperCamelCase : Tuple = torch.chunk(lowercase__ , 2 , dim=1 )
_UpperCamelCase : str = torch.clamp(self.logvar , -3_0.0 , 2_0.0 )
_UpperCamelCase : str = deterministic
_UpperCamelCase : Optional[Any] = torch.exp(0.5 * self.logvar )
_UpperCamelCase : Optional[Any] = torch.exp(self.logvar )
if self.deterministic:
_UpperCamelCase : Optional[Any] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def snake_case__ ( self : Any , lowercase__ : Optional[torch.Generator] = None ) ->torch.FloatTensor:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = randn_tensor(
self.mean.shape , generator=lowercase__ , device=self.parameters.device , dtype=self.parameters.dtype )
_UpperCamelCase : Optional[int] = self.mean + self.std * sample
return x
def snake_case__ ( self : str , lowercase__ : Optional[int]=None ) ->List[str]:
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def snake_case__ ( self : List[Any] , lowercase__ : str , lowercase__ : List[Any]=[1, 2, 3] ) ->Dict:
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
_UpperCamelCase : Optional[Any] = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowercase__ )
def snake_case__ ( self : List[Any] ) ->str:
'''simple docstring'''
return self.mean
| 204 | 1 |
from sklearn.metrics import mean_squared_error
import datasets
UpperCamelCase__ = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
UpperCamelCase__ = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
UpperCamelCase__ = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
"""simple docstring"""
def a ( self ) -> int:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"""
] , )
def a ( self ) -> Any:
"""simple docstring"""
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("""float""" ) ),
"references": datasets.Sequence(datasets.Value("""float""" ) ),
}
else:
return {
"predictions": datasets.Value("""float""" ),
"references": datasets.Value("""float""" ),
}
def a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase="uniform_average" , _UpperCamelCase=True ) -> Any:
"""simple docstring"""
__snake_case = mean_squared_error(
_UpperCamelCase , _UpperCamelCase , sample_weight=_UpperCamelCase , multioutput=_UpperCamelCase , squared=_UpperCamelCase )
return {"mse": mse}
| 268 |
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
UpperCamelCase__ = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def lowerCamelCase__ ( __A :Any ,__A :tuple ,__A :Path ,__A :int ,__A :Union[str, Any] ,__A :Optional[Any] ,__A :Optional[Any] ,__A :List[Any]=False ,):
"""simple docstring"""
output_path.parent.mkdir(parents=__A ,exist_ok=__A )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
__A ,__A ,f=output_path.as_posix() ,input_names=__A ,output_names=__A ,dynamic_axes=__A ,do_constant_folding=__A ,use_external_data_format=__A ,enable_onnx_checker=__A ,opset_version=__A ,)
else:
export(
__A ,__A ,f=output_path.as_posix() ,input_names=__A ,output_names=__A ,dynamic_axes=__A ,do_constant_folding=__A ,opset_version=__A ,)
@torch.no_grad()
def lowerCamelCase__ ( __A :str ,__A :str ,__A :int ,__A :bool = False ):
"""simple docstring"""
__snake_case = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__snake_case = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
__snake_case = """cpu"""
__snake_case = Path(__A )
# VAE DECODER
__snake_case = AutoencoderKL.from_pretrained(model_path + """/vae""" )
__snake_case = vae_decoder.config.latent_channels
# forward only through the decoder part
__snake_case = vae_decoder.decode
onnx_export(
__A ,model_args=(
torch.randn(1 ,__A ,2_5 ,2_5 ).to(device=__A ,dtype=__A ),
False,
) ,output_path=output_path / """vae_decoder""" / """model.onnx""" ,ordered_input_names=["""latent_sample""", """return_dict"""] ,output_names=["""sample"""] ,dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} ,opset=__A ,)
del vae_decoder
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=14,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
UpperCamelCase__ = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('''SD: Done: ONNX''')
| 268 | 1 |
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
a_ = 5_0000
a_ = 5000
a_ , a_ = os.path.split(__file__)
a_ = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def lowerCamelCase__ ( _a , _a):
for i in range(_a):
SCREAMING_SNAKE_CASE : str = dataset[i]
@get_duration
def lowerCamelCase__ ( _a , _a , _a):
for i in range(0 , len(_a) , _a):
SCREAMING_SNAKE_CASE : Tuple = dataset[i : i + batch_size]
@get_duration
def lowerCamelCase__ ( _a , _a , _a):
with dataset.formatted_as(type=_a):
for i in range(_a):
SCREAMING_SNAKE_CASE : Any = dataset[i]
@get_duration
def lowerCamelCase__ ( _a , _a , _a , _a):
with dataset.formatted_as(type=_a):
for i in range(0 , _a , _a):
SCREAMING_SNAKE_CASE : List[Any] = dataset[i : i + batch_size]
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : List[str] = {"num examples": SPEED_TEST_N_EXAMPLES}
SCREAMING_SNAKE_CASE : List[Any] = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted, {"type": "pandas", "length": SMALL_TEST}),
(read_formatted, {"type": "torch", "length": SMALL_TEST}),
(read_formatted, {"type": "tensorflow", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1000}),
]
SCREAMING_SNAKE_CASE : Union[str, Any] = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset")
SCREAMING_SNAKE_CASE : Optional[Any] = datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32")), "numbers": datasets.Value("float32")})
SCREAMING_SNAKE_CASE : Optional[Any] = generate_example_dataset(
os.path.join(_a , "dataset.arrow") , _a , num_examples=_a , seq_shapes={"list": (100,)} , )
print("first set of iterations")
for func, kwargs in functions:
print(func.__name__ , str(_a))
SCREAMING_SNAKE_CASE : Any = func(_a , **_a)
print("shuffling dataset")
SCREAMING_SNAKE_CASE : Optional[Any] = dataset.shuffle()
print("Second set of iterations (after shuffling")
for func, kwargs in functions_shuffled:
print("shuffled " , func.__name__ , str(_a))
SCREAMING_SNAKE_CASE : Tuple = func(
_a , **_a)
with open(_a , "wb") as f:
f.write(json.dumps(_a).encode("utf-8"))
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating() | 193 |
from __future__ import annotations
import math
import random
from typing import Any
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : list[Any] = []
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : int = 0
def __UpperCamelCase ( self : List[Any] ) -> bool:
"""simple docstring"""
return self.head == self.tail
def __UpperCamelCase ( self : Optional[int] , a : Any ) -> None:
"""simple docstring"""
self.data.append(a )
SCREAMING_SNAKE_CASE : List[str] = self.tail + 1
def __UpperCamelCase ( self : Tuple ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.data[self.head]
SCREAMING_SNAKE_CASE : List[str] = self.head + 1
return ret
def __UpperCamelCase ( self : List[str] ) -> int:
"""simple docstring"""
return self.tail - self.head
def __UpperCamelCase ( self : Optional[Any] ) -> None:
"""simple docstring"""
print(self.data )
print("**************" )
print(self.data[self.head : self.tail] )
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : List[str] , a : Any ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = data
SCREAMING_SNAKE_CASE : MyNode | None = None
SCREAMING_SNAKE_CASE : MyNode | None = None
SCREAMING_SNAKE_CASE : int = 1
def __UpperCamelCase ( self : Tuple ) -> Any:
"""simple docstring"""
return self.data
def __UpperCamelCase ( self : Optional[int] ) -> MyNode | None:
"""simple docstring"""
return self.left
def __UpperCamelCase ( self : Union[str, Any] ) -> MyNode | None:
"""simple docstring"""
return self.right
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return self.height
def __UpperCamelCase ( self : Union[str, Any] , a : Any ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = data
def __UpperCamelCase ( self : Any , a : MyNode | None ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = node
def __UpperCamelCase ( self : List[str] , a : MyNode | None ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = node
def __UpperCamelCase ( self : Optional[int] , a : int ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = height
def lowerCamelCase__ ( _a):
if node is None:
return 0
return node.get_height()
def lowerCamelCase__ ( _a , _a):
if a > b:
return a
return b
def lowerCamelCase__ ( _a):
print("left rotation node:" , node.get_data())
SCREAMING_SNAKE_CASE : List[str] = node.get_left()
assert ret is not None
node.set_left(ret.get_right())
ret.set_right(_a)
SCREAMING_SNAKE_CASE : List[Any] = my_max(get_height(node.get_right()) , get_height(node.get_left())) + 1
node.set_height(_a)
SCREAMING_SNAKE_CASE : Optional[int] = my_max(get_height(ret.get_right()) , get_height(ret.get_left())) + 1
ret.set_height(_a)
return ret
def lowerCamelCase__ ( _a):
print("right rotation node:" , node.get_data())
SCREAMING_SNAKE_CASE : Dict = node.get_right()
assert ret is not None
node.set_right(ret.get_left())
ret.set_left(_a)
SCREAMING_SNAKE_CASE : Union[str, Any] = my_max(get_height(node.get_right()) , get_height(node.get_left())) + 1
node.set_height(_a)
SCREAMING_SNAKE_CASE : Tuple = my_max(get_height(ret.get_right()) , get_height(ret.get_left())) + 1
ret.set_height(_a)
return ret
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : List[Any] = node.get_left()
assert left_child is not None
node.set_left(left_rotation(_a))
return right_rotation(_a)
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : int = node.get_right()
assert right_child is not None
node.set_right(right_rotation(_a))
return left_rotation(_a)
def lowerCamelCase__ ( _a , _a):
if node is None:
return MyNode(_a)
if data < node.get_data():
node.set_left(insert_node(node.get_left() , _a))
if (
get_height(node.get_left()) - get_height(node.get_right()) == 2
): # an unbalance detected
SCREAMING_SNAKE_CASE : List[Any] = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
SCREAMING_SNAKE_CASE : Tuple = right_rotation(_a)
else:
SCREAMING_SNAKE_CASE : int = lr_rotation(_a)
else:
node.set_right(insert_node(node.get_right() , _a))
if get_height(node.get_right()) - get_height(node.get_left()) == 2:
SCREAMING_SNAKE_CASE : Any = node.get_right()
assert right_child is not None
if data < right_child.get_data():
SCREAMING_SNAKE_CASE : Union[str, Any] = rl_rotation(_a)
else:
SCREAMING_SNAKE_CASE : int = left_rotation(_a)
SCREAMING_SNAKE_CASE : str = my_max(get_height(node.get_right()) , get_height(node.get_left())) + 1
node.set_height(_a)
return node
def lowerCamelCase__ ( _a):
while True:
SCREAMING_SNAKE_CASE : List[Any] = root.get_right()
if right_child is None:
break
SCREAMING_SNAKE_CASE : str = right_child
return root.get_data()
def lowerCamelCase__ ( _a):
while True:
SCREAMING_SNAKE_CASE : Optional[int] = root.get_left()
if left_child is None:
break
SCREAMING_SNAKE_CASE : List[str] = left_child
return root.get_data()
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : Any = root.get_left()
SCREAMING_SNAKE_CASE : List[Any] = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
SCREAMING_SNAKE_CASE : Any = get_left_most(_a)
root.set_data(_a)
root.set_right(del_node(_a , _a))
elif left_child is not None:
SCREAMING_SNAKE_CASE : Dict = left_child
elif right_child is not None:
SCREAMING_SNAKE_CASE : str = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print("No such data")
return root
else:
root.set_left(del_node(_a , _a))
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(_a , _a))
if get_height(_a) - get_height(_a) == 2:
assert right_child is not None
if get_height(right_child.get_right()) > get_height(right_child.get_left()):
SCREAMING_SNAKE_CASE : List[str] = left_rotation(_a)
else:
SCREAMING_SNAKE_CASE : int = rl_rotation(_a)
elif get_height(_a) - get_height(_a) == -2:
assert left_child is not None
if get_height(left_child.get_left()) > get_height(left_child.get_right()):
SCREAMING_SNAKE_CASE : str = right_rotation(_a)
else:
SCREAMING_SNAKE_CASE : Optional[Any] = lr_rotation(_a)
SCREAMING_SNAKE_CASE : List[str] = my_max(get_height(root.get_right()) , get_height(root.get_left())) + 1
root.set_height(_a)
return root
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : str ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : MyNode | None = None
def __UpperCamelCase ( self : Any ) -> int:
"""simple docstring"""
return get_height(self.root )
def __UpperCamelCase ( self : List[Any] , a : Any ) -> None:
"""simple docstring"""
print("insert:" + str(a ) )
SCREAMING_SNAKE_CASE : Any = insert_node(self.root , a )
def __UpperCamelCase ( self : List[Any] , a : Any ) -> None:
"""simple docstring"""
print("delete:" + str(a ) )
if self.root is None:
print("Tree is empty!" )
return
SCREAMING_SNAKE_CASE : Optional[int] = del_node(self.root , a )
def __str__( self : Optional[int] , ) -> str: # a level traversale, gives a more intuitive look on the tree
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = ""
SCREAMING_SNAKE_CASE : Optional[int] = MyQueue()
q.push(self.root )
SCREAMING_SNAKE_CASE : Any = self.get_height()
if layer == 0:
return output
SCREAMING_SNAKE_CASE : Dict = 0
while not q.is_empty():
SCREAMING_SNAKE_CASE : Dict = q.pop()
SCREAMING_SNAKE_CASE : List[Any] = " " * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(a )
q.push(a )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
SCREAMING_SNAKE_CASE : List[str] = cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , a ) - 1:
SCREAMING_SNAKE_CASE : List[str] = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def lowerCamelCase__ ( ):
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
a_ = AVLtree()
a_ = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t)) | 193 | 1 |
'''simple docstring'''
import logging
from transformers.configuration_utils import PretrainedConfig
_a : Any = logging.getLogger(__name__)
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : Tuple = "masked_bert"
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str=3_0522 , SCREAMING_SNAKE_CASE_ : int=768 , SCREAMING_SNAKE_CASE_ : List[str]=12 , SCREAMING_SNAKE_CASE_ : List[str]=12 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3072 , SCREAMING_SNAKE_CASE_ : List[str]="gelu" , SCREAMING_SNAKE_CASE_ : int=0.1 , SCREAMING_SNAKE_CASE_ : str=0.1 , SCREAMING_SNAKE_CASE_ : Tuple=512 , SCREAMING_SNAKE_CASE_ : List[Any]=2 , SCREAMING_SNAKE_CASE_ : List[Any]=0.0_2 , SCREAMING_SNAKE_CASE_ : str=1e-12 , SCREAMING_SNAKE_CASE_ : str=0 , SCREAMING_SNAKE_CASE_ : Optional[int]="topK" , SCREAMING_SNAKE_CASE_ : List[str]="constant" , SCREAMING_SNAKE_CASE_ : int=0.0 , **SCREAMING_SNAKE_CASE_ : Union[str, Any] , ) -> str:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = pruning_method
__snake_case = mask_init
__snake_case = mask_scale
| 56 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : List[Any]=None ):
'''simple docstring'''
if subparsers is not None:
A: Optional[Any] = subparsers.add_parser("""env""" )
else:
A: Union[str, Any] = argparse.ArgumentParser("""Accelerate env command""" )
parser.add_argument(
"""--config_file""" , default=lowerCamelCase__ , help="""The config file to use for the default values in the launching script.""" )
if subparsers is not None:
parser.set_defaults(func=lowerCamelCase__ )
return parser
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : Optional[Any] ):
'''simple docstring'''
A: Optional[int] = torch.__version__
A: int = torch.cuda.is_available()
A: List[Any] = is_xpu_available()
A: List[str] = is_npu_available()
A: Dict = """Not found"""
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(lowerCamelCase__ ):
A: str = load_config_from_file(args.config_file ).to_dict()
A: Optional[Any] = {
"""`Accelerate` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Numpy version""": np.__version__,
"""PyTorch version (GPU?)""": f'{pt_version} ({pt_cuda_available})',
"""PyTorch XPU available""": str(lowerCamelCase__ ),
"""PyTorch NPU available""": str(lowerCamelCase__ ),
"""System RAM""": f'{psutil.virtual_memory().total / 1024 ** 3:.2f} GB',
}
if pt_cuda_available:
A: Dict = torch.cuda.get_device_name()
print("""\nCopy-and-paste the text below in your GitHub issue\n""" )
print("""\n""".join([f'- {prop}: {val}' for prop, val in info.items()] ) )
print("""- `Accelerate` default config:""" if args.config_file is None else """- `Accelerate` config passed:""" )
A: Dict = (
"""\n""".join([f'\t- {prop}: {val}' for prop, val in accelerate_config.items()] )
if isinstance(lowerCamelCase__ , lowerCamelCase__ )
else f'\t{accelerate_config}'
)
print(lowerCamelCase__ )
A: Any = accelerate_config
return info
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
A: List[Any] = env_command_parser()
A: Dict = parser.parse_args()
env_command(lowerCamelCase__ )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 135 | 0 |
"""simple docstring"""
import gc
import threading
import time
import psutil
import torch
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any] ) -> Optional[int]:
_UpperCamelCase : List[str] = psutil.Process()
_UpperCamelCase : Tuple = False
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
_UpperCamelCase : Tuple = -1
while True:
_UpperCamelCase : int = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase : Any = True
_UpperCamelCase : Union[str, Any] = threading.Thread(target=self.peak_monitor )
_UpperCamelCase : Dict = True
self.thread.start()
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
_UpperCamelCase : str = False
self.thread.join()
return self.cpu_memory_peak
lowerCamelCase__ = PeakCPUMemory()
def lowercase__ ( ) -> Any:
"""simple docstring"""
_UpperCamelCase : List[str] = {"time": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
_UpperCamelCase : Optional[int] = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
_UpperCamelCase : Any = torch.cuda.memory_allocated(lowercase_ )
torch.cuda.reset_peak_memory_stats()
return measures
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = {"time": time.time() - start_measures["time"]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
_UpperCamelCase : List[Any] = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20
_UpperCamelCase : Tuple = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
_UpperCamelCase : List[str] = (torch.cuda.memory_allocated(lowercase_ ) - start_measures[str(lowercase_ )]) / 2**20
_UpperCamelCase : List[str] = (torch.cuda.max_memory_allocated(lowercase_ ) - start_measures[str(lowercase_ )]) / 2**20
return measures
def lowercase__ ( lowercase_ ,lowercase_ ) -> int:
"""simple docstring"""
print(F'''{description}:''' )
print(F'''- Time: {measures['time']:.2f}s''' )
for i in range(torch.cuda.device_count() ):
print(F'''- GPU {i} allocated: {measures[str(lowercase_ )]:.2f}MiB''' )
_UpperCamelCase : Union[str, Any] = measures[F'''{i}-peak''']
print(F'''- GPU {i} peak: {peak:.2f}MiB''' )
print(F'''- CPU RAM allocated: {measures['cpu']:.2f}MiB''' )
print(F'''- CPU RAM peak: {measures['cpu-peak']:.2f}MiB''' )
| 704 |
"""simple docstring"""
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
_UpperCamelCase : Tuple = tempfile.mkdtemp()
_UpperCamelCase : str = 5
# Realm tok
_UpperCamelCase : Tuple = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"test",
"question",
"this",
"is",
"the",
"first",
"second",
"third",
"fourth",
"fifth",
"record",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , "realm_tokenizer" )
os.makedirs(__a , exist_ok=__a )
_UpperCamelCase : Optional[Any] = os.path.join(__a , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
_UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , "realm_block_records" )
os.makedirs(__a , exist_ok=__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> RealmTokenizer:
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , "realm_tokenizer" ) )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
shutil.rmtree(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
_UpperCamelCase : Optional[Any] = RealmConfig(num_block_records=self.num_block_records )
return config
def __SCREAMING_SNAKE_CASE ( self : int ) -> int:
_UpperCamelCase : Any = Dataset.from_dict(
{
"id": ["0", "1"],
"question": ["foo", "bar"],
"answers": [["Foo", "Bar"], ["Bar"]],
} )
return dataset
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
_UpperCamelCase : int = np.array(
[
b"This is the first record",
b"This is the second record",
b"This is the third record",
b"This is the fourth record",
b"This is the fifth record",
b"This is a longer longer longer record",
] , dtype=__a , )
return block_records
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
_UpperCamelCase : List[str] = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
_UpperCamelCase : Tuple = self.get_config()
_UpperCamelCase : int = self.get_dummy_retriever()
_UpperCamelCase : Tuple = retriever.tokenizer
_UpperCamelCase : List[str] = np.array([0, 3] , dtype="long" )
_UpperCamelCase : Union[str, Any] = tokenizer(["Test question"] ).input_ids
_UpperCamelCase : List[str] = tokenizer(
["the fourth"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
_UpperCamelCase : str = config.reader_seq_len
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[Any] = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np" )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] , )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
_UpperCamelCase : Any = self.get_config()
_UpperCamelCase : Dict = self.get_dummy_retriever()
_UpperCamelCase : Dict = retriever.tokenizer
_UpperCamelCase : List[Any] = np.array([0, 3, 5] , dtype="long" )
_UpperCamelCase : Optional[int] = tokenizer(["Test question"] ).input_ids
_UpperCamelCase : str = tokenizer(
["the fourth", "longer longer"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
_UpperCamelCase : Union[str, Any] = config.reader_seq_len
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[Any] = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np" )
self.assertEqual([False, True, True] , __a )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __a )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
_UpperCamelCase : List[Any] = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
# Test local path
_UpperCamelCase : int = retriever.from_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
self.assertEqual(retriever.block_records[0] , b"This is the first record" )
# Test mocked remote path
with patch("transformers.models.realm.retrieval_realm.hf_hub_download" ) as mock_hf_hub_download:
_UpperCamelCase : List[Any] = os.path.join(
os.path.join(self.tmpdirname , "realm_block_records" ) , _REALM_BLOCK_RECORDS_FILENAME )
_UpperCamelCase : int = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa" )
self.assertEqual(retriever.block_records[0] , b"This is the first record" )
| 51 | 0 |
'''simple docstring'''
from __future__ import annotations
import requests
def _a( UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =f"https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"
return requests.get(UpperCamelCase__ ).json()
def _a( UpperCamelCase__ : int = 1_0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] ='''https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'''
SCREAMING_SNAKE_CASE__ : str =requests.get(UpperCamelCase__ ).json()[:max_stories]
return [get_hackernews_story(UpperCamelCase__ ) for story_id in story_ids]
def _a( UpperCamelCase__ : int = 1_0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =hackernews_top_stories(UpperCamelCase__ )
return "\n".join('''* [{title}]({url})'''.format(**UpperCamelCase__ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown()) | 296 |
'''simple docstring'''
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@property
def __magic_name__ ( self : List[Any] ) -> List[str]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __magic_name__ ( self : Optional[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Tuple =ort.SessionOptions()
SCREAMING_SNAKE_CASE__ : Dict =False
return options
def __magic_name__ ( self : Tuple ) -> int:
SCREAMING_SNAKE_CASE__ : Optional[int] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
SCREAMING_SNAKE_CASE__ : List[str] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
SCREAMING_SNAKE_CASE__ : Tuple =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy''' )
# using the PNDM scheduler by default
SCREAMING_SNAKE_CASE__ : str =OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=__lowercase , feature_extractor=__lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Any ='''A red cat sitting on a park bench'''
SCREAMING_SNAKE_CASE__ : Optional[int] =np.random.RandomState(0 )
SCREAMING_SNAKE_CASE__ : int =pipe(
prompt=__lowercase , image=__lowercase , mask_image=__lowercase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=__lowercase , output_type='''np''' , )
SCREAMING_SNAKE_CASE__ : Optional[Any] =output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1e-2 | 296 | 1 |
'''simple docstring'''
from __future__ import annotations
from math import pi
def _A ( A ,A ,A ) -> dict[str, float]:
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if inductance < 0:
raise ValueError("Inductance cannot be negative" )
if frequency < 0:
raise ValueError("Frequency cannot be negative" )
if reactance < 0:
raise ValueError("Inductive reactance cannot be negative" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Optional[Any] = {
"""configuration_swinv2""": ["""SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Swinv2Config"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict = [
"""SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Swinv2ForImageClassification""",
"""Swinv2ForMaskedImageModeling""",
"""Swinv2Model""",
"""Swinv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
lowerCAmelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 425 | 0 |
"""simple docstring"""
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'''pipelines_utils''',
'''0.22.0''',
'''Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.''',
standard_warn=False,
stacklevel=3,
)
| 238 |
'''simple docstring'''
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class _UpperCAmelCase ( snake_case , snake_case , snake_case , unittest.TestCase ):
__lowerCamelCase: Dict = StableDiffusionControlNetImgaImgPipeline
__lowerCamelCase: Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
__lowerCamelCase: Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__lowerCamelCase: Any = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'control_image'} )
__lowerCamelCase: Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : Tuple = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
torch.manual_seed(0 )
lowercase_ : Optional[Any] = ControlNetModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , )
torch.manual_seed(0 )
lowercase_ : List[Any] = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=a , set_alpha_to_one=a , )
torch.manual_seed(0 )
lowercase_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase_ : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
lowercase_ : List[str] = CLIPTextModel(a )
lowercase_ : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowercase_ : Tuple = {
"unet": unet,
"controlnet": controlnet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def lowerCAmelCase__ ( self : List[str] , a : List[str] , a : Any=0 ):
'''simple docstring'''
if str(a ).startswith("mps" ):
lowercase_ : int = torch.manual_seed(a )
else:
lowercase_ : Optional[int] = torch.Generator(device=a ).manual_seed(a )
lowercase_ : Optional[Any] = 2
lowercase_ : List[str] = randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=a , device=torch.device(a ) , )
lowercase_ : List[Any] = floats_tensor(control_image.shape , rng=random.Random(a ) ).to(a )
lowercase_ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase_ : Tuple = Image.fromarray(np.uinta(a ) ).convert("RGB" ).resize((6_4, 6_4) )
lowercase_ : List[str] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"image": image,
"control_image": control_image,
}
return inputs
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class _UpperCAmelCase ( snake_case , snake_case , unittest.TestCase ):
__lowerCamelCase: Optional[Any] = StableDiffusionControlNetImgaImgPipeline
__lowerCamelCase: List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
__lowerCamelCase: Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__lowerCamelCase: Dict = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
torch.manual_seed(0 )
def init_weights(a : int ):
if isinstance(a , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
lowercase_ : Optional[int] = ControlNetModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , )
controlneta.controlnet_down_blocks.apply(a )
torch.manual_seed(0 )
lowercase_ : Tuple = ControlNetModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , )
controlneta.controlnet_down_blocks.apply(a )
torch.manual_seed(0 )
lowercase_ : Optional[int] = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=a , set_alpha_to_one=a , )
torch.manual_seed(0 )
lowercase_ : int = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase_ : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
lowercase_ : Optional[int] = CLIPTextModel(a )
lowercase_ : Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowercase_ : Union[str, Any] = MultiControlNetModel([controlneta, controlneta] )
lowercase_ : Dict = {
"unet": unet,
"controlnet": controlnet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def lowerCAmelCase__ ( self : List[Any] , a : List[str] , a : Optional[Any]=0 ):
'''simple docstring'''
if str(a ).startswith("mps" ):
lowercase_ : str = torch.manual_seed(a )
else:
lowercase_ : Any = torch.Generator(device=a ).manual_seed(a )
lowercase_ : Dict = 2
lowercase_ : Union[str, Any] = [
randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=a , device=torch.device(a ) , ),
randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=a , device=torch.device(a ) , ),
]
lowercase_ : Dict = floats_tensor(control_image[0].shape , rng=random.Random(a ) ).to(a )
lowercase_ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase_ : Optional[int] = Image.fromarray(np.uinta(a ) ).convert("RGB" ).resize((6_4, 6_4) )
lowercase_ : Optional[int] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"image": image,
"control_image": control_image,
}
return inputs
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : Optional[int] = self.get_dummy_components()
lowercase_ : Any = self.pipeline_class(**a )
pipe.to(a )
lowercase_ : str = 10.0
lowercase_ : Dict = 4
lowercase_ : Optional[Any] = self.get_dummy_inputs(a )
lowercase_ : str = steps
lowercase_ : List[str] = scale
lowercase_ : Dict = pipe(**a )[0]
lowercase_ : Optional[Any] = self.get_dummy_inputs(a )
lowercase_ : Dict = steps
lowercase_ : Optional[int] = scale
lowercase_ : Optional[int] = pipe(**a , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
lowercase_ : List[str] = self.get_dummy_inputs(a )
lowercase_ : Any = steps
lowercase_ : Optional[int] = scale
lowercase_ : str = pipe(**a , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
lowercase_ : Union[str, Any] = self.get_dummy_inputs(a )
lowercase_ : Union[str, Any] = steps
lowercase_ : Optional[Any] = scale
lowercase_ : Dict = pipe(**a , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : Any = self.get_dummy_components()
lowercase_ : List[str] = self.pipeline_class(**a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(a )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : Optional[Any] = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny" )
lowercase_ : int = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , safety_checker=a , controlnet=a )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=a )
lowercase_ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase_ : List[str] = "evil space-punk bird"
lowercase_ : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ).resize((5_1_2, 5_1_2) )
lowercase_ : List[str] = load_image(
"https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png" ).resize((5_1_2, 5_1_2) )
lowercase_ : int = pipe(
a , a , control_image=a , generator=a , output_type="np" , num_inference_steps=5_0 , strength=0.6 , )
lowercase_ : Any = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
lowercase_ : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy" )
assert np.abs(expected_image - image ).max() < 9e-2
| 620 | 0 |
"""simple docstring"""
from maths.prime_factors import prime_factors
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__snake_case = F'''Input value of [number={number}] must be an integer'''
raise TypeError(SCREAMING_SNAKE_CASE )
if number < 1:
raise ValueError("Input must be a positive integer" )
return -1 if len(prime_factors(SCREAMING_SNAKE_CASE ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708 |
"""simple docstring"""
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
__snake_case = TapasConfig.from_json_file(SCREAMING_SNAKE_CASE )
# set absolute/relative position embeddings parameter
__snake_case = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
__snake_case = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
elif task == "WTQ":
# run_task_main.py hparams
__snake_case = 4
__snake_case = True
# hparam_utils.py hparams
__snake_case = 0.664_694
__snake_case = 0.207_951
__snake_case = 0.121_194
__snake_case = True
__snake_case = True
__snake_case = False
__snake_case = 0.0_352_513
__snake_case = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
__snake_case = 4
__snake_case = False
# hparam_utils.py hparams
__snake_case = 36.4_519
__snake_case = 0.903_421
__snake_case = 222.088
__snake_case = True
__snake_case = True
__snake_case = True
__snake_case = 0.763_141
__snake_case = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
elif task == "TABFACT":
__snake_case = TapasForSequenceClassification(config=SCREAMING_SNAKE_CASE )
elif task == "MLM":
__snake_case = TapasForMaskedLM(config=SCREAMING_SNAKE_CASE )
elif task == "INTERMEDIATE_PRETRAINING":
__snake_case = TapasModel(config=SCREAMING_SNAKE_CASE )
else:
raise ValueError(F'''Task {task} not supported.''' )
print(F'''Building PyTorch model from configuration: {config}''' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model (weights and configuration)
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
# Save tokenizer files
print(F'''Save tokenizer files to {pytorch_dump_path}''' )
__snake_case = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" , model_max_length=5_12 )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
print("Used relative position embeddings:" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA."""
)
parser.add_argument(
"""--reset_position_index_per_cell""",
default=False,
action="""store_true""",
help="""Whether to use relative position embeddings or not. Defaults to True.""",
)
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--tapas_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained TAPAS model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 614 | 0 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCamelCase : Tuple = 16
UpperCamelCase : List[Any] = 32
def A__ ( __lowerCAmelCase : Accelerator , __lowerCAmelCase : int = 16 ):
lowerCamelCase__ = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowerCamelCase__ = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__lowerCAmelCase : str ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase__ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCamelCase__ = datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase__ = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__lowerCAmelCase : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCamelCase__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCamelCase__ = 16
elif accelerator.mixed_precision != "no":
lowerCamelCase__ = 8
else:
lowerCamelCase__ = None
return tokenizer.pad(
__lowerCAmelCase , padding="""longest""" , max_length=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowerCamelCase__ = DataLoader(
tokenized_datasets["""train"""] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
lowerCamelCase__ = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCamelCase : Optional[Any] = mocked_dataloaders # noqa: F811
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] ):
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __lowerCAmelCase ) == "1":
lowerCamelCase__ = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
lowerCamelCase__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="""all""" , project_dir=args.project_dir )
else:
lowerCamelCase__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase__ = config["""lr"""]
lowerCamelCase__ = int(config["""num_epochs"""] )
lowerCamelCase__ = int(config["""seed"""] )
lowerCamelCase__ = int(config["""batch_size"""] )
set_seed(__lowerCAmelCase )
lowerCamelCase__ , lowerCamelCase__ = get_dataloaders(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
lowerCamelCase__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCamelCase__ = batch_size // MAX_GPU_BATCH_SIZE
lowerCamelCase__ = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase__ = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__lowerCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase__ = model.to(accelerator.device )
# Instantiate optimizer
lowerCamelCase__ = AdamW(params=model.parameters() , lr=__lowerCAmelCase )
# Instantiate scheduler
lowerCamelCase__ = get_linear_schedule_with_warmup(
optimizer=__lowerCAmelCase , num_warmup_steps=100 , num_training_steps=(len(__lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = accelerator.prepare(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
lowerCamelCase__ = os.path.split(__lowerCAmelCase )[-1].split(""".""" )[0]
accelerator.init_trackers(__lowerCAmelCase , __lowerCAmelCase )
# Now we train the model
for epoch in range(__lowerCAmelCase ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
lowerCamelCase__ = 0
for step, batch in enumerate(__lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCamelCase__ = model(**__lowerCAmelCase )
lowerCamelCase__ = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
lowerCamelCase__ = loss / gradient_accumulation_steps
accelerator.backward(__lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase__ = model(**__lowerCAmelCase )
lowerCamelCase__ = outputs.logits.argmax(dim=-1 )
lowerCamelCase__ , lowerCamelCase__ = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__lowerCAmelCase , references=__lowerCAmelCase , )
lowerCamelCase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , __lowerCAmelCase )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"""accuracy""": eval_metric["""accuracy"""],
"""f1""": eval_metric["""f1"""],
"""train_loss""": total_loss.item() / len(__lowerCAmelCase ),
"""epoch""": epoch,
} , step=__lowerCAmelCase , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def A__ ( ):
lowerCamelCase__ = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__lowerCAmelCase , default=__lowerCAmelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
parser.add_argument(
"""--with_tracking""" , action="""store_true""" , help="""Whether to load in all available experiment trackers from the environment and use them for logging.""" , )
parser.add_argument(
"""--project_dir""" , type=__lowerCAmelCase , default="""logs""" , help="""Location on where to store experiment tracking logs` and relevent project information""" , )
lowerCamelCase__ = parser.parse_args()
lowerCamelCase__ = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 50 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : int ) -> bool:
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('''Program to check whether a number is a Perfect number or not...''')
lowercase__ : int = int(input('''Enter number: ''').strip())
print(f"""{number} is {"" if perfect(number) else "not "}a Perfect Number.""") | 8 | 0 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class lowercase__ ( TensorFormatter[Mapping, """torch.Tensor""", Mapping] ):
def __init__( self : Tuple , UpperCAmelCase_ : int=None , **UpperCAmelCase_ : Tuple ):
super().__init__(features=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = torch_tensor_kwargs
import torch # noqa import torch at initialization
def A_ ( self : Tuple , UpperCAmelCase_ : Dict ):
import torch
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and column:
if all(
isinstance(UpperCAmelCase_ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(UpperCAmelCase_ )
return column
def A_ ( self : str , UpperCAmelCase_ : List[Any] ):
import torch
if isinstance(UpperCAmelCase_ , (str, bytes, type(UpperCAmelCase_ )) ):
return value
elif isinstance(UpperCAmelCase_ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
SCREAMING_SNAKE_CASE__ = {}
if isinstance(UpperCAmelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
SCREAMING_SNAKE_CASE__ = {'dtype': torch.intaa}
elif isinstance(UpperCAmelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
SCREAMING_SNAKE_CASE__ = {'dtype': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCAmelCase_ , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ = np.asarray(UpperCAmelCase_ )
return torch.tensor(UpperCAmelCase_ , **{**default_dtype, **self.torch_tensor_kwargs} )
def A_ ( self : Dict , UpperCAmelCase_ : List[Any] ):
import torch
# support for torch, tf, jax etc.
if hasattr(UpperCAmelCase_ , '__array__' ) and not isinstance(UpperCAmelCase_ , torch.Tensor ):
SCREAMING_SNAKE_CASE__ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCAmelCase_ , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCAmelCase_ ) for substruct in data_struct] )
elif isinstance(UpperCAmelCase_ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(UpperCAmelCase_ ) for substruct in data_struct] )
return self._tensorize(UpperCAmelCase_ )
def A_ ( self : Optional[int] , UpperCAmelCase_ : dict ):
return map_nested(self._recursive_tensorize , UpperCAmelCase_ , map_list=UpperCAmelCase_ )
def A_ ( self : List[str] , UpperCAmelCase_ : pa.Table ):
SCREAMING_SNAKE_CASE__ = self.numpy_arrow_extractor().extract_row(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.python_features_decoder.decode_row(UpperCAmelCase_ )
return self.recursive_tensorize(UpperCAmelCase_ )
def A_ ( self : int , UpperCAmelCase_ : pa.Table ):
SCREAMING_SNAKE_CASE__ = self.numpy_arrow_extractor().extract_column(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.python_features_decoder.decode_column(UpperCAmelCase_ , pa_table.column_names[0] )
SCREAMING_SNAKE_CASE__ = self.recursive_tensorize(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self._consolidate(UpperCAmelCase_ )
return column
def A_ ( self : Optional[int] , UpperCAmelCase_ : pa.Table ):
SCREAMING_SNAKE_CASE__ = self.numpy_arrow_extractor().extract_batch(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.python_features_decoder.decode_batch(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.recursive_tensorize(UpperCAmelCase_ )
for column_name in batch:
SCREAMING_SNAKE_CASE__ = self._consolidate(batch[column_name] )
return batch
| 400 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class lowercase__ ( unittest.TestCase ):
def A_ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'的',
'价',
'格',
'是',
'15',
'便',
'alex',
'##andra',
',',
'。',
'-',
't',
'shirt',
]
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ = {
'do_resize': True,
'size': {'height': 224, 'width': 224},
'do_center_crop': True,
'crop_size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.48_145_466, 0.4_578_275, 0.40_821_073],
'image_std': [0.26_862_954, 0.26_130_258, 0.27_577_711],
'do_convert_rgb': True,
}
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , UpperCAmelCase_ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : Dict , **UpperCAmelCase_ : Dict ):
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def A_ ( self : List[Any] , **UpperCAmelCase_ : Optional[int] ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def A_ ( self : Any , **UpperCAmelCase_ : List[str] ):
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def A_ ( self : Optional[int] ):
shutil.rmtree(self.tmpdirname )
def A_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A_ ( self : int ):
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = ChineseCLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = ChineseCLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase_ )
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase_ )
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase_ )
def A_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = self.get_tokenizer(cls_token='(CLS)' , sep_token='(SEP)' )
SCREAMING_SNAKE_CASE__ = self.get_image_processor(do_normalize=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='(CLS)' , sep_token='(SEP)' , do_normalize=UpperCAmelCase_ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase_ )
def A_ ( self : Dict ):
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = ChineseCLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = image_processor(UpperCAmelCase_ , return_tensors='np' )
SCREAMING_SNAKE_CASE__ = processor(images=UpperCAmelCase_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A_ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = ChineseCLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = 'Alexandra,T-shirt的价格是15便士。'
SCREAMING_SNAKE_CASE__ = processor(text=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer(UpperCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = ChineseCLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = 'Alexandra,T-shirt的价格是15便士。'
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_ ):
processor()
def A_ ( self : str ):
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = ChineseCLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE__ = processor.batch_decode(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = ChineseCLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = 'Alexandra,T-shirt的价格是15便士。'
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 400 | 1 |
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def lowerCamelCase__ ( snake_case_ : dict ) -> Dict:
return (data["data"], data["target"])
def lowerCamelCase__ ( snake_case_ : np.ndarray , snake_case_ : np.ndarray , snake_case_ : np.ndarray ) -> Tuple:
__snake_case = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(__lowerCAmelCase , __lowerCAmelCase )
# Predict target for test data
__snake_case = xgb.predict(__lowerCAmelCase )
__snake_case = predictions.reshape(len(__lowerCAmelCase ) , 1 )
return predictions
def lowerCamelCase__ ( ) -> Tuple:
__snake_case = fetch_california_housing()
__snake_case , __snake_case = data_handling(__lowerCAmelCase )
__snake_case , __snake_case , __snake_case , __snake_case = train_test_split(
__lowerCAmelCase , __lowerCAmelCase , test_size=0.25 , random_state=1 )
__snake_case = xgboost(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Error printing
print(f"""Mean Absolute Error : {mean_absolute_error(__lowerCAmelCase , __lowerCAmelCase )}""" )
print(f"""Mean Square Error : {mean_squared_error(__lowerCAmelCase , __lowerCAmelCase )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 592 |
'''simple docstring'''
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ):
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50 | 0 |
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = torch.load(_lowerCAmelCase , map_location="""cpu""" )
if "model" in sd.keys():
SCREAMING_SNAKE_CASE = torch.load(_lowerCAmelCase , map_location="""cpu""" )["model"]
# pop unnecessary weights
SCREAMING_SNAKE_CASE = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(_lowerCAmelCase )
SCREAMING_SNAKE_CASE = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
SCREAMING_SNAKE_CASE = sd.pop(_lowerCAmelCase )
SCREAMING_SNAKE_CASE = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
SCREAMING_SNAKE_CASE = sd[key]
# We split QKV in separate Q,K,V
SCREAMING_SNAKE_CASE = key.replace(""".qkv_proj.""" , """.q_proj.""" )
SCREAMING_SNAKE_CASE = key.replace(""".qkv_proj.""" , """.k_proj.""" )
SCREAMING_SNAKE_CASE = key.replace(""".qkv_proj.""" , """.v_proj.""" )
SCREAMING_SNAKE_CASE = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
SCREAMING_SNAKE_CASE = torch.split(_lowerCAmelCase , depth // 3 , dim=0 )
SCREAMING_SNAKE_CASE = q
SCREAMING_SNAKE_CASE = k
SCREAMING_SNAKE_CASE = v
del sd[key]
return sd
@torch.no_grad()
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = load_checkpoint(_lowerCAmelCase )
if config is not None:
SCREAMING_SNAKE_CASE = OPTConfig.from_pretrained(_lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE = OPTConfig()
SCREAMING_SNAKE_CASE = OPTModel(_lowerCAmelCase ).half().eval()
model.load_state_dict(_lowerCAmelCase )
# Check results
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fairseq_path""",
type=str,
help=(
"""path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"""
""" https://huggingface.co/models?other=opt_metasq"""
),
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""")
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 711 |
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
_validate_point(_SCREAMING_SNAKE_CASE )
_validate_point(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(a - b ) for a, b in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) )
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
if point:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
for item in point:
if not isinstance(_SCREAMING_SNAKE_CASE , (int, float) ):
SCREAMING_SNAKE_CASE = (
"""Expected a list of numbers as input, found """
F"""{type(_SCREAMING_SNAKE_CASE ).__name__}"""
)
raise TypeError(_SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE = F"""Expected a list of numbers as input, found {type(_SCREAMING_SNAKE_CASE ).__name__}"""
raise TypeError(_SCREAMING_SNAKE_CASE )
else:
raise ValueError("""Missing an input""" )
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
_validate_point(_SCREAMING_SNAKE_CASE )
_validate_point(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(x - y ) for x, y in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 116 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
UpperCAmelCase_ : int = {
'''microsoft/git-base''': '''https://huggingface.co/microsoft/git-base/resolve/main/config.json''',
}
class lowerCamelCase_ ( _lowercase ):
_lowercase : Union[str, Any] = '''git_vision_model'''
def __init__( self : Tuple , __A : str=768 , __A : Union[str, Any]=3072 , __A : int=12 , __A : str=12 , __A : str=3 , __A : Union[str, Any]=224 , __A : Any=16 , __A : Any="quick_gelu" , __A : Union[str, Any]=1e-5 , __A : List[str]=0.0 , __A : Dict=0.0_2 , **__A : Union[str, Any] , ):
super().__init__(**__A )
__A : Tuple = hidden_size
__A : Any = intermediate_size
__A : Dict = num_hidden_layers
__A : str = num_attention_heads
__A : Optional[Any] = num_channels
__A : int = patch_size
__A : Union[str, Any] = image_size
__A : List[str] = initializer_range
__A : Tuple = attention_dropout
__A : Optional[int] = layer_norm_eps
__A : Tuple = hidden_act
@classmethod
def lowerCAmelCase_ ( cls : Dict , __A : Union[str, os.PathLike] , **__A : str ):
cls._set_token_in_kwargs(__A )
__A , __A : Optional[int] = cls.get_config_dict(__A , **__A )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("""model_type""" ) == "git":
__A : Tuple = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__A , **__A )
class lowerCamelCase_ ( _lowercase ):
_lowercase : Dict = '''git'''
def __init__( self : str , __A : int=None , __A : Any=3_0522 , __A : Any=768 , __A : str=6 , __A : List[Any]=12 , __A : Dict=3072 , __A : List[str]="gelu" , __A : Tuple=0.1 , __A : Any=0.1 , __A : str=1024 , __A : List[Any]=0.0_2 , __A : str=1e-1_2 , __A : Union[str, Any]=0 , __A : Any="absolute" , __A : str=True , __A : int=False , __A : List[Any]=101 , __A : Union[str, Any]=102 , __A : Tuple=None , **__A : Tuple , ):
super().__init__(bos_token_id=__A , eos_token_id=__A , pad_token_id=__A , **__A )
if vision_config is None:
__A : Optional[int] = {}
logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" )
__A : List[Any] = GitVisionConfig(**__A )
__A : List[str] = vocab_size
__A : str = hidden_size
__A : Optional[Any] = num_hidden_layers
__A : Optional[int] = num_attention_heads
__A : Dict = hidden_act
__A : int = intermediate_size
__A : Optional[Any] = hidden_dropout_prob
__A : Optional[int] = attention_probs_dropout_prob
__A : Union[str, Any] = max_position_embeddings
__A : Any = initializer_range
__A : Any = layer_norm_eps
__A : int = position_embedding_type
__A : Union[str, Any] = use_cache
__A : Dict = tie_word_embeddings
__A : int = num_image_with_embedding
__A : Optional[int] = bos_token_id
__A : Dict = eos_token_id
def lowerCAmelCase_ ( self : Union[str, Any] ):
__A : Union[str, Any] = copy.deepcopy(self.__dict__ )
__A : Optional[int] = self.vision_config.to_dict()
__A : int = self.__class__.model_type
return output
| 17 |
def __SCREAMING_SNAKE_CASE ( a__ : int ) -> int:
if not isinstance(a__ ,a__ ):
raise TypeError("""Input value must be an 'int' type""" )
__A : Union[str, Any] = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 1 |
'''simple docstring'''
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
UpperCamelCase_ : int = {
"""tiny.en""": """https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt""",
"""tiny""": """https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt""",
"""base.en""": """https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt""",
"""base""": """https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt""",
"""small.en""": """https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt""",
"""small""": """https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt""",
"""medium.en""": """https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt""",
"""medium""": """https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt""",
"""large""": """https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt""",
"""large-v2""": """https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt""",
}
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
a__ = ["layers", "blocks"]
for k in ignore_keys:
state_dict.pop(_lowercase , _lowercase )
UpperCamelCase_ : int = {
"""blocks""": """layers""",
"""mlp.0""": """fc1""",
"""mlp.2""": """fc2""",
"""mlp_ln""": """final_layer_norm""",
""".attn.query""": """.self_attn.q_proj""",
""".attn.key""": """.self_attn.k_proj""",
""".attn.value""": """.self_attn.v_proj""",
""".attn_ln""": """.self_attn_layer_norm""",
""".attn.out""": """.self_attn.out_proj""",
""".cross_attn.query""": """.encoder_attn.q_proj""",
""".cross_attn.key""": """.encoder_attn.k_proj""",
""".cross_attn.value""": """.encoder_attn.v_proj""",
""".cross_attn_ln""": """.encoder_attn_layer_norm""",
""".cross_attn.out""": """.encoder_attn.out_proj""",
"""decoder.ln.""": """decoder.layer_norm.""",
"""encoder.ln.""": """encoder.layer_norm.""",
"""token_embedding""": """embed_tokens""",
"""encoder.positional_embedding""": """encoder.embed_positions.weight""",
"""decoder.positional_embedding""": """decoder.embed_positions.weight""",
"""ln_post""": """layer_norm""",
}
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
a__ = list(s_dict.keys() )
for key in keys:
a__ = key
for k, v in WHISPER_MAPPING.items():
if k in key:
a__ = new_key.replace(_lowercase , _lowercase )
print(F'{key} -> {new_key}' )
a__ = s_dict.pop(_lowercase )
return s_dict
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
a__ , a__ = emb.weight.shape
a__ = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
a__ = emb.weight.data
return lin_layer
def _lowerCAmelCase (_lowercase , _lowercase ):
"""simple docstring"""
os.makedirs(_lowercase , exist_ok=_lowercase )
a__ = os.path.basename(_lowercase )
a__ = url.split("/" )[-2]
a__ = os.path.join(_lowercase , _lowercase )
if os.path.exists(_lowercase ) and not os.path.isfile(_lowercase ):
raise RuntimeError(F'{download_target} exists and is not a regular file' )
if os.path.isfile(_lowercase ):
a__ = open(_lowercase , "rb" ).read()
if hashlib.shaaaa(_lowercase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F'{download_target} exists, but the SHA256 checksum does not match; re-downloading the file' )
with urllib.request.urlopen(_lowercase ) as source, open(_lowercase , "wb" ) as output:
with tqdm(
total=int(source.info().get("Content-Length" ) ) , ncols=80 , unit="iB" , unit_scale=_lowercase , unit_divisor=10_24 ) as loop:
while True:
a__ = source.read(81_92 )
if not buffer:
break
output.write(_lowercase )
loop.update(len(_lowercase ) )
a__ = open(_lowercase , "rb" ).read()
if hashlib.shaaaa(_lowercase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." )
return model_bytes
def _lowerCAmelCase (_lowercase , _lowercase ):
"""simple docstring"""
if ".pt" not in checkpoint_path:
a__ = _download(_MODELS[checkpoint_path] )
else:
a__ = torch.load(_lowercase , map_location="cpu" )
a__ = original_checkpoint["dims"]
a__ = original_checkpoint["model_state_dict"]
a__ = state_dict["decoder.token_embedding.weight"]
remove_ignore_keys_(_lowercase )
rename_keys(_lowercase )
a__ = True
a__ = state_dict["decoder.layers.0.fc1.weight"].shape[0]
a__ = WhisperConfig(
vocab_size=dimensions["n_vocab"] , encoder_ffn_dim=_lowercase , decoder_ffn_dim=_lowercase , num_mel_bins=dimensions["n_mels"] , d_model=dimensions["n_audio_state"] , max_target_positions=dimensions["n_text_ctx"] , encoder_layers=dimensions["n_audio_layer"] , encoder_attention_heads=dimensions["n_audio_head"] , decoder_layers=dimensions["n_text_layer"] , decoder_attention_heads=dimensions["n_text_state"] , max_source_positions=dimensions["n_audio_ctx"] , )
a__ = WhisperForConditionalGeneration(_lowercase )
a__ , a__ = model.model.load_state_dict(_lowercase , strict=_lowercase )
if len(_lowercase ) > 0 and not set(_lowercase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
F' but all the following weights are missing {missing}' )
if tie_embeds:
a__ = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
a__ = proj_out_weights
model.save_pretrained(_lowercase )
if __name__ == "__main__":
UpperCamelCase_ : Optional[int] = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Patht to the downloaded checkpoints""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
UpperCamelCase_ : Tuple = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 721 |
'''simple docstring'''
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
UpperCamelCase_ : str = logging.get_logger(__name__)
UpperCamelCase_ : Optional[Any] = {name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS}
def _lowerCAmelCase (_lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.' )
if tokenizer_name is None:
a__ = TOKENIZER_CLASSES
else:
a__ = {tokenizer_name: getattr(_lowercase , tokenizer_name + "Fast" )}
logger.info(F'Loading tokenizer classes: {tokenizer_names}' )
for tokenizer_name in tokenizer_names:
a__ = TOKENIZER_CLASSES[tokenizer_name]
a__ = True
if checkpoint_name is None:
a__ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
a__ = [checkpoint_name]
logger.info(F'For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}' )
for checkpoint in checkpoint_names:
logger.info(F'Loading {tokenizer_class.__class__.__name__} {checkpoint}' )
# Load tokenizer
a__ = tokenizer_class.from_pretrained(_lowercase , force_download=_lowercase )
# Save fast tokenizer
logger.info(F'Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}' )
# For organization names we create sub-directories
if "/" in checkpoint:
a__ , a__ = checkpoint.split("/" )
a__ = os.path.join(_lowercase , _lowercase )
elif add_prefix:
a__ = checkpoint
a__ = dump_path
else:
a__ = None
a__ = dump_path
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
a__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
a__ = file_path.split(_lowercase )[-1][0]
if next_char == "/":
a__ = os.path.join(_lowercase , _lowercase )
a__ = None
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
a__ = tokenizer.save_pretrained(
_lowercase , legacy_format=_lowercase , filename_prefix=_lowercase )
logger.info(F'=> File names {file_names}' )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(_lowercase )
logger.info(F'=> removing {file_name}' )
if __name__ == "__main__":
UpperCamelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files."""
)
parser.add_argument(
"""--tokenizer_name""",
default=None,
type=str,
help=(
F"Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will "
"""download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--checkpoint_name""",
default=None,
type=str,
help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""",
)
parser.add_argument(
"""--force_download""",
action="""store_true""",
help="""Re-download checkpoints.""",
)
UpperCamelCase_ : List[Any] = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 394 | 0 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a_ = 16
a_ = 32
def a__ ( _UpperCamelCase : List[Any] ,_UpperCamelCase : Tuple = 16 ):
__lowerCamelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' )
__lowerCamelCase = load_dataset('''glue''' ,'''mrpc''' )
def tokenize_function(_UpperCamelCase : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
__lowerCamelCase = tokenizer(examples['''sentence1'''] ,examples['''sentence2'''] ,truncation=__UpperCamelCase ,max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__lowerCamelCase = datasets.map(
__UpperCamelCase ,batched=__UpperCamelCase ,remove_columns=['''idx''', '''sentence1''', '''sentence2'''] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCamelCase = tokenized_datasets.rename_column('''label''' ,'''labels''' )
def collate_fn(_UpperCamelCase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__lowerCamelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__lowerCamelCase = 16
elif accelerator.mixed_precision != "no":
__lowerCamelCase = 8
else:
__lowerCamelCase = None
return tokenizer.pad(
__UpperCamelCase ,padding='''longest''' ,max_length=__UpperCamelCase ,pad_to_multiple_of=__UpperCamelCase ,return_tensors='''pt''' ,)
# Instantiate dataloaders.
__lowerCamelCase = DataLoader(
tokenized_datasets['''train'''] ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=__UpperCamelCase )
__lowerCamelCase = DataLoader(
tokenized_datasets['''validation'''] ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a_ = mocked_dataloaders # noqa: F811
def a__ ( _UpperCamelCase : Tuple ,_UpperCamelCase : Dict ):
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' ,__UpperCamelCase ) == "1":
__lowerCamelCase = 2
# New Code #
__lowerCamelCase = int(args.gradient_accumulation_steps )
__lowerCamelCase = int(args.local_sgd_steps )
# Initialize accelerator
__lowerCamelCase = Accelerator(
cpu=args.cpu ,mixed_precision=args.mixed_precision ,gradient_accumulation_steps=__UpperCamelCase )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('''LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCamelCase = config['''lr''']
__lowerCamelCase = int(config['''num_epochs'''] )
__lowerCamelCase = int(config['''seed'''] )
__lowerCamelCase = int(config['''batch_size'''] )
__lowerCamelCase = evaluate.load('''glue''' ,'''mrpc''' )
set_seed(__UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase = get_dataloaders(__UpperCamelCase ,__UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCamelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' ,return_dict=__UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowerCamelCase = model.to(accelerator.device )
# Instantiate optimizer
__lowerCamelCase = AdamW(params=model.parameters() ,lr=__UpperCamelCase )
# Instantiate scheduler
__lowerCamelCase = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase ,num_warmup_steps=1_00 ,num_training_steps=(len(__UpperCamelCase ) * num_epochs) ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = accelerator.prepare(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# Now we train the model
for epoch in range(__UpperCamelCase ):
model.train()
with LocalSGD(
accelerator=__UpperCamelCase ,model=__UpperCamelCase ,local_sgd_steps=__UpperCamelCase ,enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__UpperCamelCase ):
__lowerCamelCase = model(**__UpperCamelCase )
__lowerCamelCase = output.loss
accelerator.backward(__UpperCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCamelCase = model(**__UpperCamelCase )
__lowerCamelCase = outputs.logits.argmax(dim=-1 )
__lowerCamelCase ,__lowerCamelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__UpperCamelCase ,references=__UpperCamelCase ,)
__lowerCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" ,__UpperCamelCase )
def a__ ( ):
__lowerCamelCase = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' ,type=__UpperCamelCase ,default=__UpperCamelCase ,choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] ,help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' ,)
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''' ,type=__UpperCamelCase ,default=1 ,help='''The number of minibatches to be ran before gradients are accumulated.''' ,)
parser.add_argument(
'''--local_sgd_steps''' ,type=__UpperCamelCase ,default=8 ,help='''Number of local SGD steps or None to disable local SGD''' )
parser.add_argument('''--cpu''' ,action='''store_true''' ,help='''If passed, will train on the CPU.''' )
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__UpperCamelCase ,__UpperCamelCase )
if __name__ == "__main__":
main()
| 175 |
'''simple docstring'''
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> float:
if discount_rate < 0:
raise ValueError("""Discount rate cannot be negative""" )
if not cash_flows:
raise ValueError("""Cash flows list cannot be empty""" )
UpperCamelCase = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(__UpperCamelCase ) )
return round(__UpperCamelCase , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 301 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowercase_ : str = {
'''configuration_layoutlmv3''': [
'''LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LayoutLMv3Config''',
'''LayoutLMv3OnnxConfig''',
],
'''processing_layoutlmv3''': ['''LayoutLMv3Processor'''],
'''tokenization_layoutlmv3''': ['''LayoutLMv3Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Tuple = ['''LayoutLMv3TokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[Any] = [
'''LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv3ForQuestionAnswering''',
'''LayoutLMv3ForSequenceClassification''',
'''LayoutLMv3ForTokenClassification''',
'''LayoutLMv3Model''',
'''LayoutLMv3PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : str = [
'''TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLayoutLMv3ForQuestionAnswering''',
'''TFLayoutLMv3ForSequenceClassification''',
'''TFLayoutLMv3ForTokenClassification''',
'''TFLayoutLMv3Model''',
'''TFLayoutLMv3PreTrainedModel''',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[int] = ['''LayoutLMv3FeatureExtractor''']
lowercase_ : int = ['''LayoutLMv3ImageProcessor''']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
lowercase_ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 653 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( ):
lowercase = []
lowercase = 1
while len(lowercase_ ) < 1E6:
constant.append(str(lowercase_ ) )
i += 1
lowercase = """""".join(lowercase_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 653 | 1 |
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 681 |
def _a ( lowerCamelCase ):
if num < 0:
return False
lowerCamelCase : int = num
lowerCamelCase : int = 0
while num > 0:
lowerCamelCase : str = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 681 | 1 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
_UpperCamelCase : List[Any] = {
"facebook/detr-resnet-50": "https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _snake_case ( a_ ):
SCREAMING_SNAKE_CASE : List[Any] = '''detr'''
SCREAMING_SNAKE_CASE : Optional[int] = ['''past_key_values''']
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=1_00 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=20_48 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=20_48 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=2_56 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="sine" , _SCREAMING_SNAKE_CASE="resnet50" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.1 , **_SCREAMING_SNAKE_CASE , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
lowerCAmelCase = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase = backbone_config.get('model_type' )
lowerCAmelCase = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase = config_class.from_dict(_SCREAMING_SNAKE_CASE )
# set timm attributes to None
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None, None, None
lowerCAmelCase = use_timm_backbone
lowerCAmelCase = backbone_config
lowerCAmelCase = num_channels
lowerCAmelCase = num_queries
lowerCAmelCase = d_model
lowerCAmelCase = encoder_ffn_dim
lowerCAmelCase = encoder_layers
lowerCAmelCase = encoder_attention_heads
lowerCAmelCase = decoder_ffn_dim
lowerCAmelCase = decoder_layers
lowerCAmelCase = decoder_attention_heads
lowerCAmelCase = dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = activation_function
lowerCAmelCase = init_std
lowerCAmelCase = init_xavier_std
lowerCAmelCase = encoder_layerdrop
lowerCAmelCase = decoder_layerdrop
lowerCAmelCase = encoder_layers
lowerCAmelCase = auxiliary_loss
lowerCAmelCase = position_embedding_type
lowerCAmelCase = backbone
lowerCAmelCase = use_pretrained_backbone
lowerCAmelCase = dilation
# Hungarian matcher
lowerCAmelCase = class_cost
lowerCAmelCase = bbox_cost
lowerCAmelCase = giou_cost
# Loss coefficients
lowerCAmelCase = mask_loss_coefficient
lowerCAmelCase = dice_loss_coefficient
lowerCAmelCase = bbox_loss_coefficient
lowerCAmelCase = giou_loss_coefficient
lowerCAmelCase = eos_coefficient
super().__init__(is_encoder_decoder=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self.d_model
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return cls(backbone_config=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowerCAmelCase = self.backbone_config.to_dict()
lowerCAmelCase = self.__class__.model_type
return output
class _snake_case ( a_ ):
SCREAMING_SNAKE_CASE : Optional[int] = version.parse('''1.11''' )
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return 1e-5
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return 12
| 704 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCamelCase : str = logging.get_logger(__name__)
_UpperCamelCase : Any = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
_UpperCamelCase : Dict = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
_UpperCamelCase : Optional[Any] = {
"facebook/bart-base": 1024,
"facebook/bart-large": 1024,
"facebook/bart-large-mnli": 1024,
"facebook/bart-large-cnn": 1024,
"facebook/bart-large-xsum": 1024,
"yjernite/bart_eli5": 1024,
}
@lru_cache()
def snake_case ( ) -> int:
"""simple docstring"""
lowerCAmelCase = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
lowerCAmelCase = bs[:]
lowerCAmelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case )
cs.append(2**8 + n )
n += 1
lowerCAmelCase = [chr(snake_case ) for n in cs]
return dict(zip(snake_case , snake_case ) )
def snake_case ( snake_case : Any ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = set()
lowerCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase = char
return pairs
class _snake_case ( a_ ):
SCREAMING_SNAKE_CASE : List[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Optional[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="replace" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ):
'''simple docstring'''
lowerCAmelCase = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else bos_token
lowerCAmelCase = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else eos_token
lowerCAmelCase = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else sep_token
lowerCAmelCase = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else cls_token
lowerCAmelCase = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else unk_token
lowerCAmelCase = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
errors=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as vocab_handle:
lowerCAmelCase = json.load(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = {v: k for k, v in self.encoder.items()}
lowerCAmelCase = errors # how to handle errors in decoding
lowerCAmelCase = bytes_to_unicode()
lowerCAmelCase = {v: k for k, v in self.byte_encoder.items()}
with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as merges_handle:
lowerCAmelCase = merges_handle.read().split('\n' )[1:-1]
lowerCAmelCase = [tuple(merge.split() ) for merge in bpe_merges]
lowerCAmelCase = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
lowerCAmelCase = {}
lowerCAmelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCAmelCase = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return len(self.encoder )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowerCAmelCase = tuple(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = get_pairs(_SCREAMING_SNAKE_CASE )
if not pairs:
return token
while True:
lowerCAmelCase = min(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : self.bpe_ranks.get(_SCREAMING_SNAKE_CASE , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase , lowerCAmelCase = bigram
lowerCAmelCase = []
lowerCAmelCase = 0
while i < len(_SCREAMING_SNAKE_CASE ):
try:
lowerCAmelCase = word.index(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase = j
if word[i] == first and i < len(_SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase = tuple(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = new_word
if len(_SCREAMING_SNAKE_CASE ) == 1:
break
else:
lowerCAmelCase = get_pairs(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = ' '.join(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = word
return word
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = []
for token in re.findall(self.pat , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_SCREAMING_SNAKE_CASE ).split(' ' ) )
return bpe_tokens
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.encoder.get(_SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.decoder.get(_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = ''.join(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_SCREAMING_SNAKE_CASE , ensure_ascii=_SCREAMING_SNAKE_CASE ) + '\n' )
lowerCAmelCase = 0
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _SCREAMING_SNAKE_CASE : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!' )
lowerCAmelCase = token_index
writer.write(' '.join(_SCREAMING_SNAKE_CASE ) + '\n' )
index += 1
return vocab_file, merge_file
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
lowerCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_SCREAMING_SNAKE_CASE ) > 0 and not text[0].isspace()):
lowerCAmelCase = ' ' + text
return (text, kwargs)
| 514 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_snake_case : Tuple = StableDiffusionLDMaDPipeline
_snake_case : str = TEXT_TO_IMAGE_PARAMS
_snake_case : List[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
_snake_case : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
def snake_case__ ( self : List[str] ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
_UpperCamelCase = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , )
torch.manual_seed(0 )
_UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_UpperCamelCase = CLIPTextModel(lowerCAmelCase__ )
_UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_UpperCamelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : List[str]=0 ) -> Tuple:
'''simple docstring'''
if str(lowerCAmelCase__ ).startswith('''mps''' ):
_UpperCamelCase = torch.manual_seed(lowerCAmelCase__ )
else:
_UpperCamelCase = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
_UpperCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = StableDiffusionLDMaDPipeline(**lowerCAmelCase__ )
_UpperCamelCase = ldmad_pipe.to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase = self.get_dummy_inputs(lowerCAmelCase__ )
_UpperCamelCase = ldmad_pipe(**lowerCAmelCase__ )
_UpperCamelCase , _UpperCamelCase = output.rgb, output.depth
_UpperCamelCase = rgb[0, -3:, -3:, -1]
_UpperCamelCase = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
_UpperCamelCase = np.array(
[0.37338176, 0.70247, 0.74203193, 0.51643604, 0.58256793, 0.60932136, 0.4181095, 0.48355877, 0.46535262] )
_UpperCamelCase = np.array([103.46727, 85.812004, 87.849236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2
def snake_case__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = StableDiffusionLDMaDPipeline(**lowerCAmelCase__ )
_UpperCamelCase = ldmad_pipe.to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase = self.get_dummy_inputs(lowerCAmelCase__ )
_UpperCamelCase = 3 * [inputs['''prompt''']]
# forward
_UpperCamelCase = ldmad_pipe(**lowerCAmelCase__ )
_UpperCamelCase , _UpperCamelCase = output.rgb, output.depth
_UpperCamelCase = rgb_slice_a[0, -3:, -3:, -1]
_UpperCamelCase = depth_slice_a[0, -3:, -1]
_UpperCamelCase = self.get_dummy_inputs(lowerCAmelCase__ )
_UpperCamelCase = 3 * [inputs.pop('''prompt''' )]
_UpperCamelCase = ldmad_pipe.tokenizer(
lowerCAmelCase__ , padding='''max_length''' , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=lowerCAmelCase__ , return_tensors='''pt''' , )
_UpperCamelCase = text_inputs['''input_ids'''].to(lowerCAmelCase__ )
_UpperCamelCase = ldmad_pipe.text_encoder(lowerCAmelCase__ )[0]
_UpperCamelCase = prompt_embeds
# forward
_UpperCamelCase = ldmad_pipe(**lowerCAmelCase__ )
_UpperCamelCase , _UpperCamelCase = output.rgb, output.depth
_UpperCamelCase = rgb_slice_a[0, -3:, -3:, -1]
_UpperCamelCase = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4
def snake_case__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
_UpperCamelCase = StableDiffusionLDMaDPipeline(**lowerCAmelCase__ )
_UpperCamelCase = ldmad_pipe.to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase = self.get_dummy_inputs(lowerCAmelCase__ )
_UpperCamelCase = '''french fries'''
_UpperCamelCase = ldmad_pipe(**lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ )
_UpperCamelCase , _UpperCamelCase = output.rgb, output.depth
_UpperCamelCase = rgb[0, -3:, -3:, -1]
_UpperCamelCase = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
_UpperCamelCase = np.array(
[0.37044, 0.71811503, 0.7223251, 0.48603675, 0.5638391, 0.6364948, 0.42833704, 0.4901315, 0.47926217] )
_UpperCamelCase = np.array([107.84738, 84.62802, 89.962135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : Any ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : Any="cpu" , lowerCAmelCase__ : int=torch.floataa , lowerCAmelCase__ : Any=0 ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
_UpperCamelCase = np.random.RandomState(lowerCAmelCase__ ).standard_normal((1, 4, 64, 64) )
_UpperCamelCase = torch.from_numpy(lowerCAmelCase__ ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ )
_UpperCamelCase = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__ ( self : Dict ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d''' )
_UpperCamelCase = ldmad_pipe.to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase = self.get_inputs(lowerCAmelCase__ )
_UpperCamelCase = ldmad_pipe(**lowerCAmelCase__ )
_UpperCamelCase , _UpperCamelCase = output.rgb, output.depth
_UpperCamelCase = rgb[0, -3:, -3:, -1].flatten()
_UpperCamelCase = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
_UpperCamelCase = np.array(
[0.53805465, 0.56707305, 0.5486515, 0.57012236, 0.5814511, 0.56253487, 0.54843014, 0.55092263, 0.6459706] )
_UpperCamelCase = np.array(
[0.9263781, 0.6678672, 0.5486515, 0.92202145, 0.67831135, 0.56253487, 0.9241694, 0.7551478, 0.6459706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3
@nightly
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : str ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self : List[str] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Any="cpu" , lowerCAmelCase__ : Dict=torch.floataa , lowerCAmelCase__ : Any=0 ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
_UpperCamelCase = np.random.RandomState(lowerCAmelCase__ ).standard_normal((1, 4, 64, 64) )
_UpperCamelCase = torch.from_numpy(lowerCAmelCase__ ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ )
_UpperCamelCase = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 50,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__ ( self : Optional[int] ) -> int:
'''simple docstring'''
_UpperCamelCase = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d''' ).to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase = self.get_inputs(lowerCAmelCase__ )
_UpperCamelCase = ldmad_pipe(**lowerCAmelCase__ )
_UpperCamelCase , _UpperCamelCase = output.rgb, output.depth
_UpperCamelCase = 0.495586
_UpperCamelCase = 0.33795515
_UpperCamelCase = 112.48518
_UpperCamelCase = 98.489746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
def snake_case__ ( self : Dict ) -> Any:
'''simple docstring'''
_UpperCamelCase = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d-4c''' ).to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase = self.get_inputs(lowerCAmelCase__ )
_UpperCamelCase = ldmad_pipe(**lowerCAmelCase__ )
_UpperCamelCase , _UpperCamelCase = output.rgb, output.depth
_UpperCamelCase = 0.4194127
_UpperCamelCase = 0.35375586
_UpperCamelCase = 0.5638502
_UpperCamelCase = 0.34686103
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
| 98 |
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class _UpperCamelCase ( _A ):
'''simple docstring'''
@require_torch
def lowerCAmelCase__ ( self : List[str] ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
UpperCamelCase_: Optional[Any] = """
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
"""
UpperCamelCase_: List[Any] = """
mname = \"hf-internal-testing/tiny-random-bert\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task=\"fill-mask\", model=mname)
print(\"success\")
"""
UpperCamelCase_: Dict = """
import socket
def offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")
socket.socket = offline_socket
"""
# Force fetching the files so that we can use the cache
UpperCamelCase_: int = """hf-internal-testing/tiny-random-bert"""
BertConfig.from_pretrained(snake_case_ )
BertModel.from_pretrained(snake_case_ )
BertTokenizer.from_pretrained(snake_case_ )
pipeline(task="""fill-mask""" , model=snake_case_ )
# baseline - just load from_pretrained with normal network
UpperCamelCase_: Tuple = [sys.executable, """-c""", """\n""".join([load, run, mock] )]
# should succeed
UpperCamelCase_: Optional[int] = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCamelCase_: int = """1"""
UpperCamelCase_: Optional[Any] = subprocess.run(snake_case_ , env=snake_case_ , check=snake_case_ , capture_output=snake_case_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def lowerCAmelCase__ ( self : Any ):
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
UpperCamelCase_: str = """
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
"""
UpperCamelCase_: Any = """
mname = \"hf-internal-testing/tiny-random-bert\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task=\"fill-mask\", model=mname)
print(\"success\")
"""
UpperCamelCase_: Optional[int] = """
import socket
def offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")
socket.socket = offline_socket
"""
# Force fetching the files so that we can use the cache
UpperCamelCase_: Optional[int] = """hf-internal-testing/tiny-random-bert"""
BertConfig.from_pretrained(snake_case_ )
BertModel.from_pretrained(snake_case_ )
BertTokenizer.from_pretrained(snake_case_ )
pipeline(task="""fill-mask""" , model=snake_case_ )
# baseline - just load from_pretrained with normal network
UpperCamelCase_: Optional[int] = [sys.executable, """-c""", """\n""".join([load, run, mock] )]
# should succeed
UpperCamelCase_: List[str] = self.get_env()
UpperCamelCase_: List[Any] = subprocess.run(snake_case_ , env=snake_case_ , check=snake_case_ , capture_output=snake_case_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def lowerCAmelCase__ ( self : int ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
UpperCamelCase_: Dict = """
from transformers import BertConfig, BertModel, BertTokenizer
"""
UpperCamelCase_: Optional[Any] = """
mname = \"hf-internal-testing/tiny-random-bert-sharded\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print(\"success\")
"""
UpperCamelCase_: Dict = """
import socket
def offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")
socket.socket = offline_socket
"""
# baseline - just load from_pretrained with normal network
UpperCamelCase_: Union[str, Any] = [sys.executable, """-c""", """\n""".join([load, run] )]
# should succeed
UpperCamelCase_: Union[str, Any] = self.get_env()
UpperCamelCase_: List[Any] = subprocess.run(snake_case_ , env=snake_case_ , check=snake_case_ , capture_output=snake_case_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
# next emulate no network
UpperCamelCase_: Optional[int] = [sys.executable, """-c""", """\n""".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCamelCase_: Union[str, Any] = """1"""
UpperCamelCase_: int = subprocess.run(snake_case_ , env=snake_case_ , check=snake_case_ , capture_output=snake_case_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Dict = """
from transformers import pipeline
"""
UpperCamelCase_: Union[str, Any] = """
mname = \"hf-internal-testing/tiny-random-bert\"
pipe = pipeline(model=mname)
"""
UpperCamelCase_: Dict = """
import socket
def offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")
socket.socket = offline_socket
"""
UpperCamelCase_: Optional[Any] = self.get_env()
UpperCamelCase_: int = """1"""
UpperCamelCase_: int = [sys.executable, """-c""", """\n""".join([load, mock, run] )]
UpperCamelCase_: Any = subprocess.run(snake_case_ , env=snake_case_ , check=snake_case_ , capture_output=snake_case_ )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
"""You cannot infer task automatically within `pipeline` when using offline mode""" , result.stderr.decode().replace("""\n""" , """""" ) , )
@require_torch
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: str = """
from transformers import AutoModel
"""
UpperCamelCase_: Dict = """
mname = \"hf-internal-testing/test_dynamic_model\"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print(\"success\")
"""
# baseline - just load from_pretrained with normal network
UpperCamelCase_: Optional[int] = [sys.executable, """-c""", """\n""".join([load, run] )]
# should succeed
UpperCamelCase_: Optional[Any] = self.get_env()
UpperCamelCase_: int = subprocess.run(snake_case_ , env=snake_case_ , check=snake_case_ , capture_output=snake_case_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCamelCase_: Union[str, Any] = """1"""
UpperCamelCase_: List[str] = subprocess.run(snake_case_ , env=snake_case_ , check=snake_case_ , capture_output=snake_case_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
| 548 | 0 |
'''simple docstring'''
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
__lowercase = 6378137.0
__lowercase = 6356752.314245
__lowercase = 6_37_81_37
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase_ : Any =(AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
lowerCAmelCase_ : Any =atan((1 - flattening) * tan(radians(_SCREAMING_SNAKE_CASE ) ) )
lowerCAmelCase_ : str =atan((1 - flattening) * tan(radians(_SCREAMING_SNAKE_CASE ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
lowerCAmelCase_ : str =haversine_distance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
lowerCAmelCase_ : int =(b_lata + b_lata) / 2
lowerCAmelCase_ : Optional[int] =(b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
lowerCAmelCase_ : Optional[int] =(sin(_SCREAMING_SNAKE_CASE ) ** 2) * (cos(_SCREAMING_SNAKE_CASE ) ** 2)
lowerCAmelCase_ : Union[str, Any] =cos(sigma / 2 ) ** 2
lowerCAmelCase_ : Any =(sigma - sin(_SCREAMING_SNAKE_CASE )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
lowerCAmelCase_ : Optional[int] =(cos(_SCREAMING_SNAKE_CASE ) ** 2) * (sin(_SCREAMING_SNAKE_CASE ) ** 2)
lowerCAmelCase_ : Optional[int] =sin(sigma / 2 ) ** 2
lowerCAmelCase_ : int =(sigma + sin(_SCREAMING_SNAKE_CASE )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 305 |
'''simple docstring'''
import functools
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# Validation
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not all(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for day in days ):
raise ValueError('''The parameter days should be a list of integers''' )
if len(_SCREAMING_SNAKE_CASE ) != 3 or not all(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for cost in costs ):
raise ValueError('''The parameter costs should be a list of three integers''' )
if len(_SCREAMING_SNAKE_CASE ) == 0:
return 0
if min(_SCREAMING_SNAKE_CASE ) <= 0:
raise ValueError('''All days elements should be greater than 0''' )
if max(_SCREAMING_SNAKE_CASE ) >= 366:
raise ValueError('''All days elements should be less than 366''' )
lowerCAmelCase_ : List[str] =set(_SCREAMING_SNAKE_CASE )
@functools.cache
def dynamic_programming(_SCREAMING_SNAKE_CASE ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 305 | 1 |
"""simple docstring"""
from math import factorial
def _snake_case ( UpperCamelCase : int = 100 ):
return sum(int(lowercase__ ) for x in str(factorial(lowercase__ ) ) )
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 160 |
"""simple docstring"""
def UpperCamelCase__ ( lowercase__ : int , lowercase__ : int ):
return int((input_a, input_a).count(1 ) != 0 )
def UpperCamelCase__ ( ):
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 134 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__magic_name__ : Tuple = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Optional[int] = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : int = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
__magic_name__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 608 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__snake_case )
class lowerCamelCase ( __snake_case ):
"""simple docstring"""
lowerCAmelCase_ = field(default="""text-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
lowerCAmelCase_ = Features({"""text""": Value("""string""" )} )
lowerCAmelCase_ = Features({"""labels""": ClassLabel} )
lowerCAmelCase_ = "text"
lowerCAmelCase_ = "labels"
def lowercase_ ( self , __UpperCamelCase ):
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , __UpperCamelCase ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
A_ = copy.deepcopy(self )
A_ = self.label_schema.copy()
A_ = features[self.label_column]
A_ = label_schema
return task_template
@property
def lowercase_ ( self ):
return {
self.text_column: "text",
self.label_column: "labels",
}
| 608 | 1 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
__lowerCAmelCase = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
__lowerCAmelCase = {
'allenai/led-base-16384': 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def __SCREAMING_SNAKE_CASE ( ):
_snake_case = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
_snake_case = bs[:]
_snake_case = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_SCREAMING_SNAKE_CASE )
cs.append(2**8 + n )
n += 1
_snake_case = [chr(_SCREAMING_SNAKE_CASE ) for n in cs]
return dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = set()
_snake_case = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_snake_case = char
return pairs
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ["input_ids", "attention_mask"]
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase="replace" , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , UpperCAmelCase=False , **UpperCAmelCase , ) -> List[Any]:
_snake_case = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else bos_token
_snake_case = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else eos_token
_snake_case = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else sep_token
_snake_case = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else cls_token
_snake_case = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else unk_token
_snake_case = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_snake_case = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
super().__init__(
errors=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , **UpperCAmelCase , )
with open(UpperCAmelCase , encoding="""utf-8""" ) as vocab_handle:
_snake_case = json.load(UpperCAmelCase )
_snake_case = {v: k for k, v in self.encoder.items()}
_snake_case = errors # how to handle errors in decoding
_snake_case = bytes_to_unicode()
_snake_case = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCAmelCase , encoding="""utf-8""" ) as merges_handle:
_snake_case = merges_handle.read().split("""\n""" )[1:-1]
_snake_case = [tuple(merge.split() ) for merge in bpe_merges]
_snake_case = dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
_snake_case = {}
_snake_case = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_snake_case = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def lowercase (self ) -> List[str]:
return len(self.encoder )
def lowercase (self ) -> Optional[int]:
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase (self , UpperCAmelCase ) -> List[Any]:
if token in self.cache:
return self.cache[token]
_snake_case = tuple(UpperCAmelCase )
_snake_case = get_pairs(UpperCAmelCase )
if not pairs:
return token
while True:
_snake_case = min(UpperCAmelCase , key=lambda UpperCAmelCase : self.bpe_ranks.get(UpperCAmelCase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
_snake_case, _snake_case = bigram
_snake_case = []
_snake_case = 0
while i < len(UpperCAmelCase ):
try:
_snake_case = word.index(UpperCAmelCase , UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_snake_case = j
if word[i] == first and i < len(UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_snake_case = tuple(UpperCAmelCase )
_snake_case = new_word
if len(UpperCAmelCase ) == 1:
break
else:
_snake_case = get_pairs(UpperCAmelCase )
_snake_case = """ """.join(UpperCAmelCase )
_snake_case = word
return word
def lowercase (self , UpperCAmelCase ) -> List[Any]:
_snake_case = []
for token in re.findall(self.pat , UpperCAmelCase ):
_snake_case = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCAmelCase ).split(""" """ ) )
return bpe_tokens
def lowercase (self , UpperCAmelCase ) -> List[str]:
return self.encoder.get(UpperCAmelCase , self.encoder.get(self.unk_token ) )
def lowercase (self , UpperCAmelCase ) -> Union[str, Any]:
return self.decoder.get(UpperCAmelCase )
def lowercase (self , UpperCAmelCase ) -> Union[str, Any]:
_snake_case = """""".join(UpperCAmelCase )
_snake_case = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def lowercase (self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_snake_case = os.path.join(
UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_snake_case = os.path.join(
UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase , ensure_ascii=UpperCAmelCase ) + """\n""" )
_snake_case = 0
with open(UpperCAmelCase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
_snake_case = token_index
writer.write(""" """.join(UpperCAmelCase ) + """\n""" )
index += 1
return vocab_file, merge_file
def lowercase (self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_snake_case = [self.cls_token_id]
_snake_case = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase (self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase )) + [1]
return [1] + ([0] * len(UpperCAmelCase )) + [1, 1] + ([0] * len(UpperCAmelCase )) + [1]
def lowercase (self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase (self , UpperCAmelCase , UpperCAmelCase=False , **UpperCAmelCase ) -> List[Any]:
_snake_case = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCAmelCase ) > 0 and not text[0].isspace()):
_snake_case = """ """ + text
return (text, kwargs)
def lowercase (self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = PaddingStrategy.DO_NOT_PAD , UpperCAmelCase = None , UpperCAmelCase = None , ) -> dict:
_snake_case = super()._pad(
encoded_inputs=UpperCAmelCase , max_length=UpperCAmelCase , padding_strategy=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , )
# Load from model defaults
if return_attention_mask is None:
_snake_case = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_snake_case = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_snake_case = len(encoded_inputs["""global_attention_mask"""] ) != len(UpperCAmelCase )
if needs_to_be_padded:
_snake_case = len(UpperCAmelCase ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_snake_case = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
_snake_case = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs | 585 |
'''simple docstring'''
class _lowerCAmelCase :
'''simple docstring'''
def __init__(self , UpperCAmelCase , UpperCAmelCase ) -> Any:
_snake_case = name
_snake_case = val
def __str__(self ) -> List[str]:
return f"""{self.__class__.__name__}({self.name}, {self.val})"""
def __lt__(self , UpperCAmelCase ) -> Any:
return self.val < other.val
class _lowerCAmelCase :
'''simple docstring'''
def __init__(self , UpperCAmelCase ) -> Dict:
_snake_case = {}
_snake_case = {}
_snake_case = self.build_heap(UpperCAmelCase )
def __getitem__(self , UpperCAmelCase ) -> Union[str, Any]:
return self.get_value(UpperCAmelCase )
def lowercase (self , UpperCAmelCase ) -> Dict:
return (idx - 1) // 2
def lowercase (self , UpperCAmelCase ) -> Optional[Any]:
return idx * 2 + 1
def lowercase (self , UpperCAmelCase ) -> Optional[int]:
return idx * 2 + 2
def lowercase (self , UpperCAmelCase ) -> Union[str, Any]:
return self.heap_dict[key]
def lowercase (self , UpperCAmelCase ) -> str:
_snake_case = len(UpperCAmelCase ) - 1
_snake_case = self.get_parent_idx(UpperCAmelCase )
for idx, i in enumerate(UpperCAmelCase ):
_snake_case = idx
_snake_case = i.val
for i in range(UpperCAmelCase , -1 , -1 ):
self.sift_down(UpperCAmelCase , UpperCAmelCase )
return array
def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> int:
while True:
_snake_case = self.get_left_child_idx(UpperCAmelCase ) # noqa: E741
_snake_case = self.get_right_child_idx(UpperCAmelCase )
_snake_case = idx
if l < len(UpperCAmelCase ) and array[l] < array[idx]:
_snake_case = l
if r < len(UpperCAmelCase ) and array[r] < array[smallest]:
_snake_case = r
if smallest != idx:
_snake_case, _snake_case = array[smallest], array[idx]
(
(
_snake_case
), (
_snake_case
),
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
_snake_case = smallest
else:
break
def lowercase (self , UpperCAmelCase ) -> str:
_snake_case = self.get_parent_idx(UpperCAmelCase )
while p >= 0 and self.heap[p] > self.heap[idx]:
_snake_case, _snake_case = self.heap[idx], self.heap[p]
_snake_case, _snake_case = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
_snake_case = p
_snake_case = self.get_parent_idx(UpperCAmelCase )
def lowercase (self ) -> Optional[int]:
return self.heap[0]
def lowercase (self ) -> List[Any]:
_snake_case, _snake_case = self.heap[-1], self.heap[0]
_snake_case, _snake_case = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
_snake_case = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def lowercase (self , UpperCAmelCase ) -> List[str]:
self.heap.append(UpperCAmelCase )
_snake_case = len(self.heap ) - 1
_snake_case = node.val
self.sift_up(len(self.heap ) - 1 )
def lowercase (self ) -> int:
return len(self.heap ) == 0
def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
_snake_case = new_value
_snake_case = new_value
self.sift_up(self.idx_of_element[node] )
__lowerCAmelCase = Node('R', -1)
__lowerCAmelCase = Node('B', 6)
__lowerCAmelCase = Node('A', 3)
__lowerCAmelCase = Node('X', 1)
__lowerCAmelCase = Node('E', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
__lowerCAmelCase = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('Min Heap - before decrease key')
for i in my_min_heap.heap:
print(i)
print('Min Heap - After decrease key of node [B -> -17]')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod() | 585 | 1 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
UpperCamelCase_ = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
UpperCamelCase_ = subprocess.check_output(F"git diff --name-only {fork_point_sha}".split()).decode('''utf-8''').split()
UpperCamelCase_ = '''|'''.join(sys.argv[1:])
UpperCamelCase_ = re.compile(RF"^({joined_dirs}).*?\.py$")
UpperCamelCase_ = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''')
| 322 |
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class _snake_case :
'''simple docstring'''
A__ : Any = BlenderbotConfig
A__ : List[str] = {}
A__ : Tuple = "gelu"
def __init__( self: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any]=13 ,lowerCamelCase_: List[str]=7 ,lowerCamelCase_: List[Any]=True ,lowerCamelCase_: Tuple=False ,lowerCamelCase_: List[Any]=99 ,lowerCamelCase_: Dict=32 ,lowerCamelCase_: str=2 ,lowerCamelCase_: List[Any]=4 ,lowerCamelCase_: List[Any]=37 ,lowerCamelCase_: str=0.1 ,lowerCamelCase_: List[Any]=0.1 ,lowerCamelCase_: Optional[int]=20 ,lowerCamelCase_: Tuple=2 ,lowerCamelCase_: str=1 ,lowerCamelCase_: Any=0 ,) -> List[str]:
UpperCAmelCase_ : Any = parent
UpperCAmelCase_ : List[str] = batch_size
UpperCAmelCase_ : Dict = seq_length
UpperCAmelCase_ : Dict = is_training
UpperCAmelCase_ : int = use_labels
UpperCAmelCase_ : List[str] = vocab_size
UpperCAmelCase_ : List[str] = hidden_size
UpperCAmelCase_ : Any = num_hidden_layers
UpperCAmelCase_ : int = num_attention_heads
UpperCAmelCase_ : str = intermediate_size
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : Tuple = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[Any] = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = eos_token_id
UpperCAmelCase_ : Union[str, Any] = pad_token_id
UpperCAmelCase_ : str = bos_token_id
def A__ ( self: Optional[Any] ) -> Tuple:
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size )
UpperCAmelCase_ : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 )
UpperCAmelCase_ : Dict = tf.concat([input_ids, eos_tensor] ,axis=1 )
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : Optional[int] = self.config_cls(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,)
UpperCAmelCase_ : int = prepare_blenderbot_inputs_dict(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
return config, inputs_dict
def A__ ( self: Dict ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Union[str, Any] ) -> int:
UpperCAmelCase_ : List[str] = TFBlenderbotModel(config=lowerCamelCase_ ).get_decoder()
UpperCAmelCase_ : List[str] = inputs_dict["""input_ids"""]
UpperCAmelCase_ : Dict = input_ids[:1, :]
UpperCAmelCase_ : Dict = inputs_dict["""attention_mask"""][:1, :]
UpperCAmelCase_ : str = inputs_dict["""head_mask"""]
UpperCAmelCase_ : str = 1
# first forward pass
UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,head_mask=lowerCamelCase_ ,use_cache=lowerCamelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase_ : Dict = ids_tensor((self.batch_size, 3) ,config.vocab_size )
UpperCAmelCase_ : Any = tf.cast(ids_tensor((self.batch_size, 3) ,2 ) ,tf.inta )
# append to next input_ids and
UpperCAmelCase_ : List[Any] = tf.concat([input_ids, next_tokens] ,axis=-1 )
UpperCAmelCase_ : Optional[Any] = tf.concat([attention_mask, next_attn_mask] ,axis=-1 )
UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ )[0]
UpperCAmelCase_ : Tuple = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,past_key_values=lowerCamelCase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1] )
# select random slice
UpperCAmelCase_ : List[Any] = int(ids_tensor((1,) ,output_from_past.shape[-1] ) )
UpperCAmelCase_ : List[Any] = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase_ : Optional[int] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCamelCase_ ,lowerCamelCase_ ,rtol=1e-3 )
def lowerCamelCase_ ( _a : Any , _a : Tuple , _a : Any , _a : Optional[int]=None , _a : int=None , _a : int=None , _a : int=None , _a : Dict=None , ):
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase_ : Optional[Any] = tf.cast(tf.math.not_equal(_a , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase_ : List[str] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase_ : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase_ : List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase_ : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Union[str, Any] = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
A__ : Optional[int] = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
A__ : Optional[Any] = (
{
"conversational": TFBlenderbotForConditionalGeneration,
"feature-extraction": TFBlenderbotModel,
"summarization": TFBlenderbotForConditionalGeneration,
"text2text-generation": TFBlenderbotForConditionalGeneration,
"translation": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
A__ : int = True
A__ : Optional[int] = False
A__ : Union[str, Any] = False
def A__ ( self: str ) -> List[Any]:
UpperCAmelCase_ : str = TFBlenderbotModelTester(self )
UpperCAmelCase_ : List[str] = ConfigTester(self ,config_class=lowerCamelCase_ )
def A__ ( self: Union[str, Any] ) -> Any:
self.config_tester.run_common_tests()
def A__ ( self: str ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCamelCase_ )
@require_tokenizers
@require_tf
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
A__ : int = ["My friends are cool but they eat too many carbs."]
A__ : Optional[int] = "facebook/blenderbot-400M-distill"
@cached_property
def A__ ( self: Optional[Any] ) -> Optional[int]:
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def A__ ( self: Tuple ) -> List[str]:
UpperCAmelCase_ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def A__ ( self: Dict ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = self.tokenizer(self.src_text ,return_tensors="""tf""" )
UpperCAmelCase_ : Tuple = self.model.generate(
model_inputs.input_ids ,)
UpperCAmelCase_ : List[Any] = self.tokenizer.batch_decode(generated_ids.numpy() ,skip_special_tokens=lowerCamelCase_ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 322 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
a__ : List[str] = logging.get_logger(__name__)
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : int , *a__ : Union[str, Any] , **a__ : Any ):
warnings.warn(
'''The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use FlavaImageProcessor instead.''' , a__ , )
super().__init__(*a__ , **a__ )
| 51 |
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def __a ( lowerCAmelCase_ : int = 8 ) -> str:
'''simple docstring'''
UpperCAmelCase_= ascii_letters + digits + punctuation
return "".join(secrets.choice(lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ ) )
def __a ( lowerCAmelCase_ : str ,lowerCAmelCase_ : int ) -> str:
'''simple docstring'''
i -= len(lowerCAmelCase_ )
UpperCAmelCase_= i // 3
UpperCAmelCase_= i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
UpperCAmelCase_= (
chars_incl
+ random(lowerCAmelCase_ ,quotient + remainder )
+ random(lowerCAmelCase_ ,lowerCAmelCase_ )
+ random(lowerCAmelCase_ ,lowerCAmelCase_ )
)
UpperCAmelCase_= list(lowerCAmelCase_ )
shuffle(lowerCAmelCase_ )
return "".join(lowerCAmelCase_ )
# random is a generalised function for letters, characters and numbers
def __a ( lowerCAmelCase_ : str ,lowerCAmelCase_ : int ) -> str:
'''simple docstring'''
return "".join(secrets.choice(lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ ) )
def __a ( lowerCAmelCase_ : Any ,lowerCAmelCase_ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
pass # Put your code here...
def __a ( lowerCAmelCase_ : Any ,lowerCAmelCase_ : Dict ) -> Optional[int]:
'''simple docstring'''
pass # Put your code here...
def __a ( lowerCAmelCase_ : Any ,lowerCAmelCase_ : List[str] ) -> str:
'''simple docstring'''
pass # Put your code here...
def __a ( lowerCAmelCase_ : str ,lowerCAmelCase_ : int = 8 ) -> bool:
'''simple docstring'''
if len(lowerCAmelCase_ ) < min_length:
# Your Password must be at least 8 characters long
return False
UpperCAmelCase_= any(char in ascii_uppercase for char in password )
UpperCAmelCase_= any(char in ascii_lowercase for char in password )
UpperCAmelCase_= any(char in digits for char in password )
UpperCAmelCase_= any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def __a ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_= int(input("""Please indicate the max length of your password: """ ).strip() )
UpperCAmelCase_= input(
"""Please indicate the characters that must be in your password: """ ).strip()
print("""Password generated:""" ,password_generator(lowerCAmelCase_ ) )
print(
"""Alternative Password generated:""" ,alternative_password_generator(lowerCAmelCase_ ,lowerCAmelCase_ ) ,)
print("""[If you are thinking of using this passsword, You better save it.]""" )
if __name__ == "__main__":
main()
| 593 | 0 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class _snake_case (UpperCamelCase_):
__A : List[Any] ="WhisperFeatureExtractor"
__A : Optional[int] ="WhisperTokenizer"
def __init__( self ,_snake_case ,_snake_case ):
super().__init__(__a ,__a )
UpperCAmelCase_ : Optional[Any] = self.feature_extractor
UpperCAmelCase_ : int = False
def UpperCamelCase__ ( self ,_snake_case=None ,_snake_case=None ,_snake_case=True ):
return self.tokenizer.get_decoder_prompt_ids(task=__a ,language=__a ,no_timestamps=__a )
def __call__( self ,*_snake_case ,**_snake_case ):
if self._in_target_context_manager:
return self.current_processor(*__a ,**__a )
UpperCAmelCase_ : str = kwargs.pop("audio" ,__a )
UpperCAmelCase_ : Tuple = kwargs.pop("sampling_rate" ,__a )
UpperCAmelCase_ : List[Any] = kwargs.pop("text" ,__a )
if len(__a ) > 0:
UpperCAmelCase_ : str = args[0]
UpperCAmelCase_ : Optional[Any] = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
UpperCAmelCase_ : int = self.feature_extractor(__a ,*__a ,sampling_rate=__a ,**__a )
if text is not None:
UpperCAmelCase_ : Union[str, Any] = self.tokenizer(__a ,**__a )
if text is None:
return inputs
elif audio is None:
return encodings
else:
UpperCAmelCase_ : Optional[int] = encodings['input_ids']
return inputs
def UpperCamelCase__ ( self ,*_snake_case ,**_snake_case ):
return self.tokenizer.batch_decode(*__a ,**__a )
def UpperCamelCase__ ( self ,*_snake_case ,**_snake_case ):
return self.tokenizer.decode(*__a ,**__a )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case="np" ):
return self.tokenizer.get_prompt_ids(__a ,return_tensors=__a )
| 717 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase = {
"""configuration_swinv2""": ["""SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Swinv2Config"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
"""SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Swinv2ForImageClassification""",
"""Swinv2ForMaskedImageModeling""",
"""Swinv2Model""",
"""Swinv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 323 | 0 |
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
__UpperCAmelCase = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowerCamelCase : int =None
@experimental
def snake_case_ (__A : Union[str, Any] , __A : Any , __A : Tuple , __A : Dict , __A : List[str] , __A : Dict , __A : Union[str, Any] ) -> Dict:
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
__A , __A , __A , __A , __A , __A , __A )
return _map_with_joblib(__A , __A , __A , __A , __A , __A , __A )
def snake_case_ (__A : Optional[int] , __A : Any , __A : Dict , __A : List[str] , __A : List[Any] , __A : Union[str, Any] , __A : Tuple ) -> Dict:
__lowerCAmelCase : Tuple = num_proc if num_proc <= len(__A ) else len(__A )
__lowerCAmelCase : Any = [] # We organize the splits ourselve (contiguous splits)
for index in range(__A ):
__lowerCAmelCase : List[str] = len(__A ) // num_proc
__lowerCAmelCase : str = len(__A ) % num_proc
__lowerCAmelCase : Dict = div * index + min(__A , __A )
__lowerCAmelCase : Optional[Any] = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(__A ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
f'''Error dividing inputs iterable among processes. '''
f'''Total number of objects {len(__A )}, '''
f'''length: {sum(len(i[1] ) for i in split_kwds )}''' )
logger.info(
f'''Spawning {num_proc} processes for {len(__A )} objects in slices of {[len(i[1] ) for i in split_kwds]}''' )
__lowerCAmelCase ,__lowerCAmelCase : Union[str, Any] = None, None
if not disable_tqdm:
__lowerCAmelCase ,__lowerCAmelCase : Any = (RLock(),), tqdm.set_lock
with Pool(__A , initargs=__A , initializer=__A ) as pool:
__lowerCAmelCase : Any = pool.map(__A , __A )
logger.info(f'''Finished {num_proc} processes''' )
__lowerCAmelCase : Union[str, Any] = [obj for proc_res in mapped for obj in proc_res]
logger.info(f'''Unpacked {len(__A )} objects''' )
return mapped
def snake_case_ (__A : Tuple , __A : List[Any] , __A : int , __A : str , __A : Tuple , __A : Tuple , __A : Optional[Any] ) -> Any:
# progress bar is not yet supported for _map_with_joblib, because tqdm couldn't accurately be applied to joblib,
# and it requires monkey-patching joblib internal classes which is subject to change
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=__A ):
return joblib.Parallel()(
joblib.delayed(__A )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def snake_case_ (__A : str ) -> int:
__lowerCAmelCase : Any = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
__lowerCAmelCase : Union[str, Any] = None
| 651 |
import math
def snake_case_ (__A : int = 1_0_0 ) -> int:
__lowerCAmelCase : List[str] = sum(i * i for i in range(1 , n + 1 ) )
__lowerCAmelCase : int = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F'{solution() = }')
| 651 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase_ = {
'''configuration_efficientnet''': [
'''EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientNetConfig''',
'''EfficientNetOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''EfficientNetImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientNetForImageClassification''',
'''EfficientNetModel''',
'''EfficientNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 717 |
def snake_case ( UpperCAmelCase : Optional[int], UpperCAmelCase : Union[str, Any] ):
A = ''
for i in table:
res += inp[i - 1]
return res
def snake_case ( UpperCAmelCase : Union[str, Any] ):
return data[1:] + data[0]
def snake_case ( UpperCAmelCase : Union[str, Any], UpperCAmelCase : Dict ):
A = ''
for i in range(len(UpperCAmelCase ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def snake_case ( UpperCAmelCase : int, UpperCAmelCase : Optional[Any] ):
A = int('0b' + data[0] + data[-1], 2 )
A = int('0b' + data[1:3], 2 )
return bin(s[row][col] )[2:]
def snake_case ( UpperCAmelCase : Optional[Any], UpperCAmelCase : Any, UpperCAmelCase : Union[str, Any], UpperCAmelCase : Optional[Any], UpperCAmelCase : Optional[int] ):
A = message[:4]
A = message[4:]
A = apply_table(UpperCAmelCase, UpperCAmelCase )
A = xor(UpperCAmelCase, UpperCAmelCase )
A = apply_sbox(UpperCAmelCase, temp[:4] ) # noqa: E741
A = apply_sbox(UpperCAmelCase, temp[4:] )
A = '0' * (2 - len(UpperCAmelCase )) + l # noqa: E741
A = '0' * (2 - len(UpperCAmelCase )) + r
A = apply_table(l + r, UpperCAmelCase )
A = xor(UpperCAmelCase, UpperCAmelCase )
return temp + right
if __name__ == "__main__":
lowerCAmelCase_ = input('Enter 10 bit key: ')
lowerCAmelCase_ = input('Enter 8 bit message: ')
lowerCAmelCase_ = [6, 3, 7, 4, 8, 5, 10, 9]
lowerCAmelCase_ = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
lowerCAmelCase_ = [2, 4, 3, 1]
lowerCAmelCase_ = [2, 6, 3, 1, 4, 8, 5, 7]
lowerCAmelCase_ = [4, 1, 3, 5, 7, 2, 8, 6]
lowerCAmelCase_ = [4, 1, 2, 3, 2, 3, 4, 1]
lowerCAmelCase_ = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
lowerCAmelCase_ = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
lowerCAmelCase_ = apply_table(key, paa_table)
lowerCAmelCase_ = temp[:5]
lowerCAmelCase_ = temp[5:]
lowerCAmelCase_ = left_shift(left)
lowerCAmelCase_ = left_shift(right)
lowerCAmelCase_ = apply_table(left + right, pa_table)
lowerCAmelCase_ = left_shift(left)
lowerCAmelCase_ = left_shift(right)
lowerCAmelCase_ = left_shift(left)
lowerCAmelCase_ = left_shift(right)
lowerCAmelCase_ = apply_table(left + right, pa_table)
# encryption
lowerCAmelCase_ = apply_table(message, IP)
lowerCAmelCase_ = function(expansion, sa, sa, keya, temp)
lowerCAmelCase_ = temp[4:] + temp[:4]
lowerCAmelCase_ = function(expansion, sa, sa, keya, temp)
lowerCAmelCase_ = apply_table(temp, IP_inv)
print('Cipher text is:', CT)
# decryption
lowerCAmelCase_ = apply_table(CT, IP)
lowerCAmelCase_ = function(expansion, sa, sa, keya, temp)
lowerCAmelCase_ = temp[4:] + temp[:4]
lowerCAmelCase_ = function(expansion, sa, sa, keya, temp)
lowerCAmelCase_ = apply_table(temp, IP_inv)
print('Plain text after decypting is:', PT)
| 110 | 0 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class lowerCamelCase__ ( TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
def __init__( self : Optional[int] , lowercase__ : Dict=None , **lowercase__ : List[Any] ):
super().__init__(features=lowercase__ )
_lowerCAmelCase = torch_tensor_kwargs
import torch # noqa import torch at initialization
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , lowercase__ : Union[str, Any] ):
import torch
if isinstance(lowercase__ , lowercase__ ) and column:
if all(
isinstance(lowercase__ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(lowercase__ )
return column
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , lowercase__ : Union[str, Any] ):
import torch
if isinstance(lowercase__ , (str, bytes, type(lowercase__ )) ):
return value
elif isinstance(lowercase__ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
_lowerCAmelCase = {}
if isinstance(lowercase__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
_lowerCAmelCase = {'dtype': torch.intaa}
elif isinstance(lowercase__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
_lowerCAmelCase = {'dtype': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(lowercase__ , PIL.Image.Image ):
_lowerCAmelCase = np.asarray(lowercase__ )
return torch.tensor(lowercase__ , **{**default_dtype, **self.torch_tensor_kwargs} )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , lowercase__ : List[str] ):
import torch
# support for torch, tf, jax etc.
if hasattr(lowercase__ , '__array__' ) and not isinstance(lowercase__ , torch.Tensor ):
_lowerCAmelCase = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(lowercase__ , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(lowercase__ ) for substruct in data_struct] )
elif isinstance(lowercase__ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(lowercase__ ) for substruct in data_struct] )
return self._tensorize(lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , lowercase__ : dict ):
return map_nested(self._recursive_tensorize , lowercase__ , map_list=lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : int , lowercase__ : pa.Table ):
_lowerCAmelCase = self.numpy_arrow_extractor().extract_row(lowercase__ )
_lowerCAmelCase = self.python_features_decoder.decode_row(lowercase__ )
return self.recursive_tensorize(lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , lowercase__ : pa.Table ):
_lowerCAmelCase = self.numpy_arrow_extractor().extract_column(lowercase__ )
_lowerCAmelCase = self.python_features_decoder.decode_column(lowercase__ , pa_table.column_names[0] )
_lowerCAmelCase = self.recursive_tensorize(lowercase__ )
_lowerCAmelCase = self._consolidate(lowercase__ )
return column
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , lowercase__ : pa.Table ):
_lowerCAmelCase = self.numpy_arrow_extractor().extract_batch(lowercase__ )
_lowerCAmelCase = self.python_features_decoder.decode_batch(lowercase__ )
_lowerCAmelCase = self.recursive_tensorize(lowercase__ )
for column_name in batch:
_lowerCAmelCase = self._consolidate(batch[column_name] )
return batch
| 192 | import argparse
from collections import defaultdict
import yaml
_lowercase: List[Any] = '''docs/source/en/_toctree.yml'''
def _lowerCamelCase ( snake_case ):
_lowerCAmelCase = defaultdict(snake_case )
_lowerCAmelCase = []
_lowerCAmelCase = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'local': doc['local'], 'title': doc['title']} )
else:
new_doc_list.append(snake_case )
_lowerCAmelCase = new_doc_list
_lowerCAmelCase = [key for key, value in counts.items() if value > 1]
_lowerCAmelCase = []
for duplicate_key in duplicates:
_lowerCAmelCase = list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} )
if len(snake_case ) > 1:
raise ValueError(
F'{duplicate_key} is present several times in the documentation table of content at '
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] )
_lowerCAmelCase = sorted(snake_case , key=lambda snake_case : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(snake_case ) > 1:
raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' )
overview_doc.extend(snake_case )
# Sort
return overview_doc
def _lowerCamelCase ( snake_case=False ):
with open(snake_case , encoding='utf-8' ) as f:
_lowerCAmelCase = yaml.safe_load(f.read() )
# Get to the API doc
_lowerCAmelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowerCAmelCase = content[api_idx]['sections']
# Then to the model doc
_lowerCAmelCase = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
_lowerCAmelCase = api_doc[scheduler_idx]['sections']
_lowerCAmelCase = clean_doc_toc(snake_case )
_lowerCAmelCase = False
if new_scheduler_doc != scheduler_doc:
_lowerCAmelCase = True
if overwrite:
_lowerCAmelCase = new_scheduler_doc
if diff:
if overwrite:
_lowerCAmelCase = api_doc
with open(snake_case , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(snake_case , allow_unicode=snake_case ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
def _lowerCamelCase ( snake_case=False ):
with open(snake_case , encoding='utf-8' ) as f:
_lowerCAmelCase = yaml.safe_load(f.read() )
# Get to the API doc
_lowerCAmelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowerCAmelCase = content[api_idx]['sections']
# Then to the model doc
_lowerCAmelCase = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
_lowerCAmelCase = False
_lowerCAmelCase = api_doc[pipeline_idx]['sections']
_lowerCAmelCase = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
_lowerCAmelCase = pipeline_doc['section']
_lowerCAmelCase = clean_doc_toc(snake_case )
if overwrite:
_lowerCAmelCase = new_sub_pipeline_doc
new_pipeline_docs.append(snake_case )
# sort overall pipeline doc
_lowerCAmelCase = clean_doc_toc(snake_case )
if new_pipeline_docs != pipeline_docs:
_lowerCAmelCase = True
if overwrite:
_lowerCAmelCase = new_pipeline_docs
if diff:
if overwrite:
_lowerCAmelCase = api_doc
with open(snake_case , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(snake_case , allow_unicode=snake_case ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
_lowercase: Tuple = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
_lowercase: Optional[Any] = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 192 | 1 |
'''simple docstring'''
import baseaa
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> bytes:
"""simple docstring"""
return baseaa.aaaencode(string.encode('''utf-8''' ) )
def __snake_case ( SCREAMING_SNAKE_CASE_ : bytes ) -> str:
"""simple docstring"""
return baseaa.aaadecode(SCREAMING_SNAKE_CASE_ ).decode('''utf-8''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 570 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
a__ : Tuple = [
'openmmlab/upernet-convnext-tiny',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
a__ : List[str] = 'UperNetConfig'
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , a__ : int , a__ : int , a__ : Union[int, Tuple[int, int]] , a__ : Union[int, Tuple[int, int], str] = 0 , a__ : bool = False , a__ : Union[int, Tuple[int, int]] = 1 , ):
super().__init__()
UpperCAmelCase = nn.Convad(
in_channels=a__ , out_channels=a__ , kernel_size=a__ , padding=a__ , bias=a__ , dilation=a__ , )
UpperCAmelCase = nn.BatchNormad(a__ )
UpperCAmelCase = nn.ReLU()
def __snake_case ( self : Optional[int] , a__ : torch.Tensor ):
UpperCAmelCase = self.conv(a__ )
UpperCAmelCase = self.batch_norm(a__ )
UpperCAmelCase = self.activation(a__ )
return output
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , a__ : int , a__ : int , a__ : int ):
super().__init__()
UpperCAmelCase = [
nn.AdaptiveAvgPoolad(a__ ),
UperNetConvModule(a__ , a__ , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(a__ ) , a__ )
def __snake_case ( self : Dict , a__ : torch.Tensor ):
UpperCAmelCase = input
for layer in self.layers:
UpperCAmelCase = layer(a__ )
return hidden_state
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , a__ : Tuple[int, ...] , a__ : int , a__ : int , a__ : bool ):
super().__init__()
UpperCAmelCase = pool_scales
UpperCAmelCase = align_corners
UpperCAmelCase = in_channels
UpperCAmelCase = channels
UpperCAmelCase = []
for i, pool_scale in enumerate(a__ ):
UpperCAmelCase = UperNetPyramidPoolingBlock(pool_scale=a__ , in_channels=a__ , channels=a__ )
self.blocks.append(a__ )
self.add_module(str(a__ ) , a__ )
def __snake_case ( self : str , a__ : torch.Tensor ):
UpperCAmelCase = []
for ppm in self.blocks:
UpperCAmelCase = ppm(a__ )
UpperCAmelCase = nn.functional.interpolate(
a__ , size=x.size()[2:] , mode='''bilinear''' , align_corners=self.align_corners )
ppm_outs.append(a__ )
return ppm_outs
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , a__ : Dict , a__ : int ):
super().__init__()
UpperCAmelCase = config
UpperCAmelCase = config.pool_scales # e.g. (1, 2, 3, 6)
UpperCAmelCase = in_channels
UpperCAmelCase = config.hidden_size
UpperCAmelCase = False
UpperCAmelCase = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
UpperCAmelCase = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
UpperCAmelCase = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
UpperCAmelCase = nn.ModuleList()
UpperCAmelCase = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
UpperCAmelCase = UperNetConvModule(a__ , self.channels , kernel_size=1 )
UpperCAmelCase = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(a__ )
self.fpn_convs.append(a__ )
UpperCAmelCase = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def __snake_case ( self : List[str] ):
self.apply(self._init_weights )
def __snake_case ( self : Tuple , a__ : Dict ):
if isinstance(a__ , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def __snake_case ( self : List[str] , a__ : Optional[Any] ):
UpperCAmelCase = inputs[-1]
UpperCAmelCase = [x]
psp_outs.extend(self.psp_modules(a__ ) )
UpperCAmelCase = torch.cat(a__ , dim=1 )
UpperCAmelCase = self.bottleneck(a__ )
return output
def __snake_case ( self : Tuple , a__ : torch.Tensor ):
# build laterals
UpperCAmelCase = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(a__ ) )
# build top-down path
UpperCAmelCase = len(a__ )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
UpperCAmelCase = laterals[i - 1].shape[2:]
UpperCAmelCase = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=a__ , mode='''bilinear''' , align_corners=self.align_corners )
# build outputs
UpperCAmelCase = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
UpperCAmelCase = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='''bilinear''' , align_corners=self.align_corners )
UpperCAmelCase = torch.cat(a__ , dim=1 )
UpperCAmelCase = self.fpn_bottleneck(a__ )
UpperCAmelCase = self.classifier(a__ )
return output
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , a__ : Any , a__ : int = 2 , a__ : int = 3 , a__ : Union[int, Tuple[int, int]] = 1 ):
super().__init__()
UpperCAmelCase = config
UpperCAmelCase = config.auxiliary_in_channels
UpperCAmelCase = config.auxiliary_channels
UpperCAmelCase = config.auxiliary_num_convs
UpperCAmelCase = config.auxiliary_concat_input
UpperCAmelCase = in_index
UpperCAmelCase = (kernel_size // 2) * dilation
UpperCAmelCase = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=a__ , padding=a__ , dilation=a__ ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=a__ , padding=a__ , dilation=a__ ) )
if self.num_convs == 0:
UpperCAmelCase = nn.Identity()
else:
UpperCAmelCase = nn.Sequential(*a__ )
if self.concat_input:
UpperCAmelCase = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=a__ , padding=kernel_size // 2 )
UpperCAmelCase = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def __snake_case ( self : List[str] ):
self.apply(self._init_weights )
def __snake_case ( self : Union[str, Any] , a__ : Optional[Any] ):
if isinstance(a__ , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def __snake_case ( self : Any , a__ : torch.Tensor ):
# just take the relevant feature maps
UpperCAmelCase = encoder_hidden_states[self.in_index]
UpperCAmelCase = self.convs(a__ )
if self.concat_input:
UpperCAmelCase = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
UpperCAmelCase = self.classifier(a__ )
return output
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =UperNetConfig
_lowerCamelCase ="pixel_values"
_lowerCamelCase =True
def __snake_case ( self : Dict , a__ : List[str] ):
if isinstance(a__ , a__ ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def __snake_case ( self : Any ):
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def __snake_case ( self : Union[str, Any] , a__ : Tuple , a__ : Optional[Any]=False ):
if isinstance(a__ , a__ ):
UpperCAmelCase = value
a__ : Union[str, Any] = R'\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
a__ : Union[str, Any] = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes." , UpperCAmelCase_ , )
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : Optional[int] , a__ : int ):
super().__init__(a__ )
UpperCAmelCase = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
UpperCAmelCase = UperNetHead(a__ , in_channels=self.backbone.channels )
UpperCAmelCase = UperNetFCNHead(a__ ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length''' ) )
@replace_return_docstrings(output_type=a__ , config_class=_CONFIG_FOR_DOC )
def __snake_case ( self : Tuple , a__ : Optional[torch.Tensor] = None , a__ : Optional[bool] = None , a__ : Optional[bool] = None , a__ : Optional[torch.Tensor] = None , a__ : Optional[bool] = None , ):
UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCAmelCase = output_attentions if output_attentions is not None else self.config.output_attentions
UpperCAmelCase = self.backbone.forward_with_filtered_kwargs(
a__ , output_hidden_states=a__ , output_attentions=a__ )
UpperCAmelCase = outputs.feature_maps
UpperCAmelCase = self.decode_head(a__ )
UpperCAmelCase = nn.functional.interpolate(a__ , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=a__ )
UpperCAmelCase = None
if self.auxiliary_head is not None:
UpperCAmelCase = self.auxiliary_head(a__ )
UpperCAmelCase = nn.functional.interpolate(
a__ , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=a__ )
UpperCAmelCase = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('''The number of labels should be greater than one''' )
else:
# compute weighted loss
UpperCAmelCase = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
UpperCAmelCase = loss_fct(a__ , a__ )
UpperCAmelCase = loss_fct(a__ , a__ )
UpperCAmelCase = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
UpperCAmelCase = (logits,) + outputs[1:]
else:
UpperCAmelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=a__ , logits=a__ , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 570 | 1 |
'''simple docstring'''
def _A ( UpperCAmelCase = "The quick brown fox jumps over the lazy dog" ,):
'''simple docstring'''
A__ = set()
# Replace all the whitespace in our sentence
A__ = input_str.replace(' ' ,'' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(UpperCAmelCase ) == 26
def _A ( UpperCAmelCase = "The quick brown fox jumps over the lazy dog" ,):
'''simple docstring'''
A__ = [False] * 26
for char in input_str:
if char.islower():
A__ = True
elif char.isupper():
A__ = True
return all(UpperCAmelCase )
def _A ( UpperCAmelCase = "The quick brown fox jumps over the lazy dog" ,):
'''simple docstring'''
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def _A ( ):
'''simple docstring'''
from timeit import timeit
A__ = 'from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'
print(timeit('is_pangram()' ,setup=UpperCAmelCase ) )
print(timeit('is_pangram_faster()' ,setup=UpperCAmelCase ) )
print(timeit('is_pangram_fastest()' ,setup=UpperCAmelCase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 531 |
'''simple docstring'''
from torch import nn
def _A ( UpperCAmelCase ):
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F"""Unsupported activation function: {act_fn}""" )
| 531 | 1 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
_UpperCAmelCase : Optional[int] = imread(r"""digital_image_processing/image_data/lena_small.jpg""")
_UpperCAmelCase : Tuple = cvtColor(img, COLOR_BGR2GRAY)
def __magic_name__( ):
__lowerCAmelCase = cn.convert_to_negative(__lowercase)
# assert negative_img array for at least one True
assert negative_img.any()
def __magic_name__( ):
with Image.open('''digital_image_processing/image_data/lena_small.jpg''') as img:
# Work around assertion for response
assert str(cc.change_contrast(__lowercase, 1_1_0)).startswith(
'''<PIL.Image.Image image mode=RGB size=100x100 at''')
def __magic_name__( ):
__lowerCAmelCase = canny.gen_gaussian_kernel(9, sigma=1.4)
# Assert ambiguous array
assert resp.all()
def __magic_name__( ):
__lowerCAmelCase = imread('''digital_image_processing/image_data/lena_small.jpg''', 0)
# assert ambiguous array for all == True
assert canny_img.all()
__lowerCAmelCase = canny.canny(__lowercase)
# assert canny array for at least one True
assert canny_array.any()
def __magic_name__( ):
assert gg.gaussian_filter(__lowercase, 5, sigma=0.9).all()
def __magic_name__( ):
__lowerCAmelCase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]])
__lowerCAmelCase = conv.img_convolve(__lowercase, __lowercase).astype(__lowercase)
assert res.any()
def __magic_name__( ):
assert med.median_filter(__lowercase, 3).any()
def __magic_name__( ):
__lowerCAmelCase = sob.sobel_filter(__lowercase)
assert grad.any() and theta.any()
def __magic_name__( ):
__lowerCAmelCase = sp.make_sepia(__lowercase, 2_0)
assert sepia.all()
def __magic_name__( lowerCamelCase = "digital_image_processing/image_data/lena_small.jpg"):
__lowerCAmelCase = bs.Burkes(imread(__lowercase, 1), 1_2_0)
burkes.process()
assert burkes.output_img.any()
def __magic_name__( lowerCamelCase = "digital_image_processing/image_data/lena_small.jpg", ):
__lowerCAmelCase = rs.NearestNeighbour(imread(__lowercase, 1), 4_0_0, 2_0_0)
nn.process()
assert nn.output.any()
def __magic_name__( ):
__lowerCAmelCase = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
__lowerCAmelCase = imread(__lowercase, 0)
# Test for get_neighbors_pixel function() return not None
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = image[x_coordinate][y_coordinate]
__lowerCAmelCase = lbp.get_neighbors_pixel(
__lowercase, __lowercase, __lowercase, __lowercase)
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
__lowerCAmelCase = np.zeros((image.shape[0], image.shape[1]))
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0, image.shape[0]):
for j in range(0, image.shape[1]):
__lowerCAmelCase = lbp.local_binary_value(__lowercase, __lowercase, __lowercase)
assert lbp_image.any()
| 704 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
_UpperCAmelCase : Tuple = random.Random()
if is_torch_available():
import torch
def __magic_name__( lowerCamelCase, lowerCamelCase=1.0, lowerCamelCase=None, lowerCamelCase=None):
if rng is None:
__lowerCAmelCase = global_rng
__lowerCAmelCase = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
class a__ ( unittest.TestCase ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase=7 , __lowercase=4_00 , __lowercase=20_00 , __lowercase=1 , __lowercase=0.0 , __lowercase=1_60_00 , __lowercase=True , __lowercase=True , ):
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = min_seq_length
__lowerCAmelCase = max_seq_length
__lowerCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__lowerCAmelCase = feature_size
__lowerCAmelCase = padding_value
__lowerCAmelCase = sampling_rate
__lowerCAmelCase = return_attention_mask
__lowerCAmelCase = do_normalize
def _snake_case (self ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _snake_case (self , __lowercase=False , __lowercase=False ):
def _flatten(__lowercase ):
return list(itertools.chain(*__lowercase ) )
if equal_length:
__lowerCAmelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__lowerCAmelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__lowerCAmelCase = [np.asarray(__lowercase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class a__ ( __A , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = ASTFeatureExtractor
def _snake_case (self ):
__lowerCAmelCase = ASTFeatureExtractionTester(self )
def _snake_case (self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
__lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
__lowerCAmelCase = [np.asarray(__lowercase ) for speech_input in speech_inputs]
# Test not batched input
__lowerCAmelCase = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
__lowerCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(__lowercase , __lowercase , atol=1e-3 ) )
# Test batched
__lowerCAmelCase = feat_extract(__lowercase , padding=__lowercase , return_tensors='''np''' ).input_values
__lowerCAmelCase = feat_extract(__lowercase , padding=__lowercase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(__lowercase , __lowercase ):
self.assertTrue(np.allclose(__lowercase , __lowercase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
__lowerCAmelCase = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
__lowerCAmelCase = np.asarray(__lowercase )
__lowerCAmelCase = feat_extract(__lowercase , return_tensors='''np''' ).input_values
__lowerCAmelCase = feat_extract(__lowercase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(__lowercase , __lowercase ):
self.assertTrue(np.allclose(__lowercase , __lowercase , atol=1e-3 ) )
@require_torch
def _snake_case (self ):
import torch
__lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCAmelCase = np.random.rand(1_00 ).astype(np.floataa )
__lowerCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__lowerCAmelCase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__lowerCAmelCase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def _snake_case (self , __lowercase ):
from datasets import load_dataset
__lowerCAmelCase = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
__lowerCAmelCase = ds.sort('''id''' ).select(range(__lowercase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
@require_torch
def _snake_case (self ):
# fmt: off
__lowerCAmelCase = torch.tensor(
[-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6,
-1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3,
-1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6,
-0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9] )
# fmt: on
__lowerCAmelCase = self._load_datasamples(1 )
__lowerCAmelCase = ASTFeatureExtractor()
__lowerCAmelCase = feature_extractor(__lowercase , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 10_24, 1_28) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , __lowercase , atol=1e-4 ) )
| 474 | 0 |
'''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase ( self : Dict , snake_case_ : Dict ):
snake_case__ : Any = 3
snake_case__ : Optional[Any] = 250
snake_case__ : List[Any] = ids_tensor((batch_size, length) , snake_case_ )
snake_case__ : Any = torch.ones((batch_size, length) , device=snake_case_ , dtype=torch.float ) / length
return input_ids, scores
def lowerCamelCase ( self : List[str] ):
snake_case__ , snake_case__ : Any = self._get_tensors(5 )
snake_case__ : Union[str, Any] = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(snake_case_ , snake_case_ ) )
snake_case__ , snake_case__ : Tuple = self._get_tensors(9 )
self.assertFalse(criteria(snake_case_ , snake_case_ ) )
snake_case__ , snake_case__ : Any = self._get_tensors(10 )
self.assertTrue(criteria(snake_case_ , snake_case_ ) )
def lowerCamelCase ( self : Optional[Any] ):
snake_case__ : Tuple = MaxLengthCriteria(max_length=10 )
snake_case__ , snake_case__ : str = self._get_tensors(5 )
self.assertFalse(criteria(snake_case_ , snake_case_ ) )
snake_case__ , snake_case__ : Union[str, Any] = self._get_tensors(9 )
self.assertFalse(criteria(snake_case_ , snake_case_ ) )
snake_case__ , snake_case__ : Optional[Any] = self._get_tensors(10 )
self.assertTrue(criteria(snake_case_ , snake_case_ ) )
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : Union[str, Any] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
snake_case__ , snake_case__ : Union[str, Any] = self._get_tensors(5 )
self.assertFalse(criteria(snake_case_ , snake_case_ ) )
snake_case__ , snake_case__ : Tuple = self._get_tensors(9 )
self.assertFalse(criteria(snake_case_ , snake_case_ ) )
snake_case__ , snake_case__ : Any = self._get_tensors(10 )
self.assertTrue(criteria(snake_case_ , snake_case_ ) )
snake_case__ : str = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def lowerCamelCase ( self : Optional[Any] ):
snake_case__ , snake_case__ : List[str] = self._get_tensors(5 )
snake_case__ : Tuple = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(snake_case_ , snake_case_ ) )
snake_case__ : Optional[int] = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(snake_case_ , snake_case_ ) )
def lowerCamelCase ( self : Union[str, Any] ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(snake_case_ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
snake_case__ : int = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(snake_case_ ) , 1 )
| 374 |
'''simple docstring'''
# flake8: noqa
# Lint as: python3
__a = [
"VerificationMode",
"Version",
"disable_progress_bar",
"enable_progress_bar",
"is_progress_bar_enabled",
"experimental",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 374 | 1 |
'''simple docstring'''
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
SCREAMING_SNAKE_CASE__ = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
SCREAMING_SNAKE_CASE__ = """main"""
# Default branch name
SCREAMING_SNAKE_CASE__ = """f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"""
# One particular commit (not the top of `main`)
SCREAMING_SNAKE_CASE__ = """aaaaaaa"""
# This commit does not exist, so we should 404.
SCREAMING_SNAKE_CASE__ = """d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"""
# Sha-1 of config.json on the top of `main`, for checking purposes
SCREAMING_SNAKE_CASE__ = """4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"""
@contextlib.contextmanager
def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
print('Welcome!' )
yield
print('Bye!' )
@contextlib.contextmanager
def __SCREAMING_SNAKE_CASE ( ) -> str:
print('Bonjour!' )
yield
print('Au revoir!' )
class A__ ( unittest.TestCase ):
def a__ ( self : Optional[int] ) -> int:
"""simple docstring"""
assert transformers.__spec__ is not None
assert importlib.util.find_spec('transformers' ) is not None
class A__ ( unittest.TestCase ):
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO )
def a__ ( self : str , _UpperCAmelCase : Optional[Any] ) -> List[str]:
"""simple docstring"""
with ContextManagers([] ):
print('Transformers are awesome!' )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , 'Transformers are awesome!\n' )
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO )
def a__ ( self : Any , _UpperCAmelCase : Any ) -> Optional[Any]:
"""simple docstring"""
with ContextManagers([context_en()] ):
print('Transformers are awesome!' )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , 'Welcome!\nTransformers are awesome!\nBye!\n' )
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO )
def a__ ( self : List[Any] , _UpperCAmelCase : str ) -> Tuple:
"""simple docstring"""
with ContextManagers([context_fr(), context_en()] ):
print('Transformers are awesome!' )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , 'Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n' )
@require_torch
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
self.assertEqual(find_labels(_UpperCAmelCase ) , ['labels'] )
self.assertEqual(find_labels(_UpperCAmelCase ) , ['labels', 'next_sentence_label'] )
self.assertEqual(find_labels(_UpperCAmelCase ) , ['start_positions', 'end_positions'] )
class A__ ( lowerCAmelCase__ ):
pass
self.assertEqual(find_labels(_UpperCAmelCase ) , ['labels'] )
@require_tf
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
self.assertEqual(find_labels(_UpperCAmelCase ) , ['labels'] )
self.assertEqual(find_labels(_UpperCAmelCase ) , ['labels', 'next_sentence_label'] )
self.assertEqual(find_labels(_UpperCAmelCase ) , ['start_positions', 'end_positions'] )
class A__ ( lowerCAmelCase__ ):
pass
self.assertEqual(find_labels(_UpperCAmelCase ) , ['labels'] )
@require_flax
def a__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
self.assertEqual(find_labels(_UpperCAmelCase ) , [] )
self.assertEqual(find_labels(_UpperCAmelCase ) , [] )
self.assertEqual(find_labels(_UpperCAmelCase ) , [] )
class A__ ( lowerCAmelCase__ ):
pass
self.assertEqual(find_labels(_UpperCAmelCase ) , [] )
| 708 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> int:
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowercase = F"""Input value of [number={number}] must be an integer"""
raise TypeError(SCREAMING_SNAKE_CASE )
if number < 1:
__lowercase = F"""Input value of [number={number}] must be > 0"""
raise ValueError(SCREAMING_SNAKE_CASE )
__lowercase = 1
for i in range(1 , SCREAMING_SNAKE_CASE ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 688 | 0 |
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class UpperCamelCase_ :
def __init__( self :Dict , __A :Optional[Any] , __A :Dict=13 , __A :int=7 , __A :List[str]=True , __A :Optional[int]=True , __A :Union[str, Any]=True , __A :Optional[Any]=True , __A :List[Any]=99 , __A :Any=32 , __A :Union[str, Any]=2 , __A :Optional[int]=4 , __A :int=37 , __A :Any="gelu" , __A :int=0.1 , __A :Dict=0.1 , __A :Any=512 , __A :List[Any]=16 , __A :Tuple=2 , __A :List[Any]=0.0_2 , __A :List[Any]=False , __A :int=True , __A :Union[str, Any]="None" , __A :Optional[Any]=3 , __A :Dict=4 , __A :Any=None , ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_input_mask
SCREAMING_SNAKE_CASE__ = use_token_type_ids
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = num_choices
SCREAMING_SNAKE_CASE__ = relative_attention
SCREAMING_SNAKE_CASE__ = position_biased_input
SCREAMING_SNAKE_CASE__ = pos_att_type
SCREAMING_SNAKE_CASE__ = scope
def _snake_case ( self :Dict ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=_A , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self :List[str] , __A :Optional[Any] , __A :Tuple , __A :Optional[Any] , __A :str , __A :str , __A :Optional[Any] , __A :Dict ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = TFDebertaVaModel(config=_A )
SCREAMING_SNAKE_CASE__ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
SCREAMING_SNAKE_CASE__ = [input_ids, input_mask]
SCREAMING_SNAKE_CASE__ = model(_A )
SCREAMING_SNAKE_CASE__ = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self :Optional[Any] , __A :int , __A :List[str] , __A :str , __A :Tuple , __A :List[Any] , __A :Dict , __A :Union[str, Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = TFDebertaVaForMaskedLM(config=_A )
SCREAMING_SNAKE_CASE__ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
SCREAMING_SNAKE_CASE__ = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self :int , __A :str , __A :str , __A :Union[str, Any] , __A :int , __A :Optional[Any] , __A :str , __A :Dict ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = TFDebertaVaForSequenceClassification(config=_A )
SCREAMING_SNAKE_CASE__ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
SCREAMING_SNAKE_CASE__ = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self :List[Any] , __A :List[str] , __A :Tuple , __A :int , __A :int , __A :Tuple , __A :Tuple , __A :int ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = TFDebertaVaForTokenClassification(config=_A )
SCREAMING_SNAKE_CASE__ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
SCREAMING_SNAKE_CASE__ = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self :Dict , __A :Any , __A :Union[str, Any] , __A :List[str] , __A :Dict , __A :Tuple , __A :Any , __A :Union[str, Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = TFDebertaVaForQuestionAnswering(config=_A )
SCREAMING_SNAKE_CASE__ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
SCREAMING_SNAKE_CASE__ = model(_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self :Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class UpperCamelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
lowerCamelCase_ = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCamelCase_ = (
{
"feature-extraction": TFDebertaVaModel,
"fill-mask": TFDebertaVaForMaskedLM,
"question-answering": TFDebertaVaForQuestionAnswering,
"text-classification": TFDebertaVaForSequenceClassification,
"token-classification": TFDebertaVaForTokenClassification,
"zero-shot": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase_ = False
lowerCamelCase_ = False
def _snake_case ( self :Tuple ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = TFDebertaVaModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=_A , hidden_size=37 )
def _snake_case ( self :Dict ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _snake_case ( self :Optional[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def _snake_case ( self :Dict ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def _snake_case ( self :Dict ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
def _snake_case ( self :Tuple ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def _snake_case ( self :Optional[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@slow
def _snake_case ( self :int ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
self.assertIsNotNone(_A )
@require_tf
class UpperCamelCase_ ( unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def _snake_case ( self :int ) -> Union[str, Any]:
"""simple docstring"""
pass
@slow
def _snake_case ( self :List[str] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
SCREAMING_SNAKE_CASE__ = tf.constant([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
SCREAMING_SNAKE_CASE__ = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
SCREAMING_SNAKE_CASE__ = model(_A , attention_mask=_A )[0]
SCREAMING_SNAKE_CASE__ = tf.constant(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , _A , atol=1E-4 ) | 6 | import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _snake_case ( *__snake_case , __snake_case = None , __snake_case=True , __snake_case=2 ):
from .. import __version__
_UpperCamelCase = take_from
_UpperCamelCase = ()
if not isinstance(args[0] , __snake_case ):
_UpperCamelCase = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(__snake_case ).base_version ) >= version.parse(__snake_case ):
raise ValueError(
f"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"""
f""" version {__version__} is >= {version_name}""" )
_UpperCamelCase = None
if isinstance(__snake_case , __snake_case ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(__snake_case ),)
_UpperCamelCase = f"""The `{attribute}` argument is deprecated and will be removed in version {version_name}."""
elif hasattr(__snake_case , __snake_case ):
values += (getattr(__snake_case , __snake_case ),)
_UpperCamelCase = f"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}."""
elif deprecated_kwargs is None:
_UpperCamelCase = f"""`{attribute}` is deprecated and will be removed in version {version_name}."""
if warning is not None:
_UpperCamelCase = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , __snake_case , stacklevel=__snake_case )
if isinstance(__snake_case , __snake_case ) and len(__snake_case ) > 0:
_UpperCamelCase = inspect.getouterframes(inspect.currentframe() )[1]
_UpperCamelCase = call_frame.filename
_UpperCamelCase = call_frame.lineno
_UpperCamelCase = call_frame.function
_UpperCamelCase , _UpperCamelCase = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" )
if len(__snake_case ) == 0:
return
elif len(__snake_case ) == 1:
return values[0]
return values
| 10 | 0 |
'''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
snake_case = logging.getLogger(__name__)
def UpperCAmelCase_ ( ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path" , type=lowerCamelCase_ , default="data/dump.txt" , help="The path to the data." )
parser.add_argument("--tokenizer_type" , type=lowerCamelCase_ , default="bert" , choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name" , type=lowerCamelCase_ , default="bert-base-uncased" , help="The tokenizer to use." )
parser.add_argument("--dump_file" , type=lowerCamelCase_ , default="data/dump" , help="The dump file prefix." )
lowerCAmelCase__ : Optional[int] = parser.parse_args()
logger.info(f'''Loading Tokenizer ({args.tokenizer_name})''' )
if args.tokenizer_type == "bert":
lowerCAmelCase__ : List[Any] = BertTokenizer.from_pretrained(args.tokenizer_name )
lowerCAmelCase__ : Any = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
lowerCAmelCase__ : List[Any] = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
lowerCAmelCase__ : Optional[Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
lowerCAmelCase__ : Optional[int] = tokenizer.special_tokens_map["cls_token"] # `<s>`
lowerCAmelCase__ : Union[str, Any] = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
lowerCAmelCase__ : str = GPTaTokenizer.from_pretrained(args.tokenizer_name )
lowerCAmelCase__ : Tuple = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
lowerCAmelCase__ : int = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(f'''Loading text from {args.file_path}''' )
with open(args.file_path , "r" , encoding="utf8" ) as fp:
lowerCAmelCase__ : List[Any] = fp.readlines()
logger.info("Start encoding" )
logger.info(f'''{len(lowerCamelCase_ )} examples to process.''' )
lowerCAmelCase__ : Dict = []
lowerCAmelCase__ : List[Any] = 0
lowerCAmelCase__ : str = 1_0_0_0_0
lowerCAmelCase__ : List[str] = time.time()
for text in data:
lowerCAmelCase__ : Dict = f'''{bos} {text.strip()} {sep}'''
lowerCAmelCase__ : Tuple = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
rslt.append(lowerCamelCase_ )
iter += 1
if iter % interval == 0:
lowerCAmelCase__ : List[str] = time.time()
logger.info(f'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' )
lowerCAmelCase__ : List[str] = time.time()
logger.info("Finished binarization" )
logger.info(f'''{len(lowerCamelCase_ )} examples processed.''' )
lowerCAmelCase__ : Optional[int] = f'''{args.dump_file}.{args.tokenizer_name}.pickle'''
lowerCAmelCase__ : Union[str, Any] = tokenizer.vocab_size
if vocab_size < (1 << 1_6):
lowerCAmelCase__ : Dict = [np.uintaa(lowerCamelCase_ ) for d in rslt]
else:
lowerCAmelCase__ : Optional[Any] = [np.intaa(lowerCamelCase_ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f'''Dump to {dp_file}''' )
with open(lowerCamelCase_ , "wb" ) as handle:
pickle.dump(rslt_ , lowerCamelCase_ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 568 |
'''simple docstring'''
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 568 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
class _UpperCAmelCase ( A__ ):
UpperCamelCase__ = '''encoder-decoder'''
UpperCamelCase__ = True
def __init__( self , **a__):
super().__init__(**a__)
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
A__ = kwargs.pop('''encoder''')
A__ = encoder_config.pop('''model_type''')
A__ = kwargs.pop('''decoder''')
A__ = decoder_config.pop('''model_type''')
from ..auto.configuration_auto import AutoConfig
A__ = AutoConfig.for_model(a__ , **a__)
A__ = AutoConfig.for_model(a__ , **a__)
A__ = True
@classmethod
def snake_case_ ( cls , a__ , a__ , **a__):
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''')
A__ = True
A__ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **a__)
def snake_case_ ( self):
A__ = copy.deepcopy(self.__dict__)
A__ = self.encoder.to_dict()
A__ = self.decoder.to_dict()
A__ = self.__class__.model_type
return output
| 632 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/config.json",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/config.json",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"
),
}
class _UpperCAmelCase ( A__ ):
UpperCamelCase__ = '''xlm-roberta'''
def __init__( self , a__=3_0_5_2_2 , a__=7_6_8 , a__=1_2 , a__=1_2 , a__=3_0_7_2 , a__="gelu" , a__=0.1 , a__=0.1 , a__=5_1_2 , a__=2 , a__=0.0_2 , a__=1e-12 , a__=1 , a__=0 , a__=2 , a__="absolute" , a__=True , a__=None , **a__ , ):
super().__init__(pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , **a__)
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = classifier_dropout
class _UpperCAmelCase ( A__ ):
@property
def snake_case_ ( self):
if self.task == "multiple-choice":
A__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
A__ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
])
| 632 | 1 |
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class _a ( unittest.TestCase ):
def _snake_case ( self ) -> str:
lowerCAmelCase : Any = ["""a""", """b""", """c"""]
# Defaults to last layer if both are None
lowerCAmelCase , lowerCAmelCase : Optional[Any] = get_aligned_output_features_output_indices(lowercase_ , lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , ["""c"""] )
self.assertEqual(lowercase_ , [2] )
# Out indices set to match out features
lowerCAmelCase , lowerCAmelCase : Any = get_aligned_output_features_output_indices(["""a""", """c"""] , lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , ["""a""", """c"""] )
self.assertEqual(lowercase_ , [0, 2] )
# Out features set to match out indices
lowerCAmelCase , lowerCAmelCase : Any = get_aligned_output_features_output_indices(lowercase_ , [0, 2] , lowercase_ )
self.assertEqual(lowercase_ , ["""a""", """c"""] )
self.assertEqual(lowercase_ , [0, 2] )
# Out features selected from negative indices
lowerCAmelCase , lowerCAmelCase : Tuple = get_aligned_output_features_output_indices(lowercase_ , [-3, -1] , lowercase_ )
self.assertEqual(lowercase_ , ["""a""", """c"""] )
self.assertEqual(lowercase_ , [-3, -1] )
def _snake_case ( self ) -> Union[str, Any]:
# Stage names must be set
with self.assertRaises(lowercase_ ):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , lowercase_ )
# Out features must be a list
with self.assertRaises(lowercase_ ):
verify_out_features_out_indices(("""a""", """b""") , (0, 1) , ["""a""", """b"""] )
# Out features must be a subset of stage names
with self.assertRaises(lowercase_ ):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , ["""a"""] )
# Out indices must be a list or tuple
with self.assertRaises(lowercase_ ):
verify_out_features_out_indices(lowercase_ , 0 , ["""a""", """b"""] )
# Out indices must be a subset of stage names
with self.assertRaises(lowercase_ ):
verify_out_features_out_indices(lowercase_ , (0, 1) , ["""a"""] )
# Out features and out indices must be the same length
with self.assertRaises(lowercase_ ):
verify_out_features_out_indices(["""a""", """b"""] , (0,) , ["""a""", """b""", """c"""] )
# Out features should match out indices
with self.assertRaises(lowercase_ ):
verify_out_features_out_indices(["""a""", """b"""] , (0, 2) , ["""a""", """b""", """c"""] )
# Out features and out indices should be in order
with self.assertRaises(lowercase_ ):
verify_out_features_out_indices(["""b""", """a"""] , (0, 1) , ["""a""", """b"""] )
# Check passes with valid inputs
verify_out_features_out_indices(["""a""", """b""", """d"""] , (0, 1, -1) , ["""a""", """b""", """c""", """d"""] )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : Any = BackboneMixin()
lowerCAmelCase : Optional[Any] = ["""a""", """b""", """c"""]
lowerCAmelCase : Optional[int] = ["""a""", """c"""]
lowerCAmelCase : int = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["""a""", """c"""] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
lowerCAmelCase : List[Any] = ["""a""", """b"""]
self.assertEqual(backbone.out_features , ["""a""", """b"""] )
self.assertEqual(backbone.out_indices , [0, 1] )
lowerCAmelCase : Optional[int] = [-3, -1]
self.assertEqual(backbone.out_features , ["""a""", """c"""] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 693 |
import os
import string
import sys
lowerCAmelCase : Optional[int] =1 << 8
lowerCAmelCase : List[Any] ={
'tab': ord('\t'),
'newline': ord('\r'),
'esc': 27,
'up': 65 + ARROW_KEY_FLAG,
'down': 66 + ARROW_KEY_FLAG,
'right': 67 + ARROW_KEY_FLAG,
'left': 68 + ARROW_KEY_FLAG,
'mod_int': 91,
'undefined': sys.maxsize,
'interrupt': 3,
'insert': 50,
'delete': 51,
'pg_up': 53,
'pg_down': 54,
}
lowerCAmelCase : Optional[Any] =KEYMAP['up']
lowerCAmelCase : Tuple =KEYMAP['left']
if sys.platform == "win32":
lowerCAmelCase : Dict =[]
lowerCAmelCase : int ={
b'\xe0H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\x00H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\xe0P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\x00P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\xe0M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\x00M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\xe0K': KEYMAP['left'] - ARROW_KEY_FLAG,
b'\x00K': KEYMAP['left'] - ARROW_KEY_FLAG,
}
for i in range(10):
lowerCAmelCase : Optional[Any] =ord(str(i))
def _UpperCAmelCase ( ):
'''simple docstring'''
if os.name == "nt":
import msvcrt
lowerCAmelCase : Any = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(SCREAMING_SNAKE_CASE__ ) == 0:
# Read the keystroke
lowerCAmelCase : int = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCAmelCase : Tuple = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCAmelCase : str = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(SCREAMING_SNAKE_CASE__ )
if ord(SCREAMING_SNAKE_CASE__ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_2_6 ) )
lowerCAmelCase : Optional[Any] = chr(KEYMAP["""esc"""] )
except KeyError:
lowerCAmelCase : Optional[int] = cha[1]
else:
lowerCAmelCase : Any = ch.decode(SCREAMING_SNAKE_CASE__ )
else:
lowerCAmelCase : Optional[int] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCAmelCase : List[Any] = sys.stdin.fileno()
lowerCAmelCase : str = termios.tcgetattr(SCREAMING_SNAKE_CASE__ )
try:
tty.setraw(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Optional[Any] = sys.stdin.read(1 )
finally:
termios.tcsetattr(SCREAMING_SNAKE_CASE__ ,termios.TCSADRAIN ,SCREAMING_SNAKE_CASE__ )
return ch
def _UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase : Any = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(SCREAMING_SNAKE_CASE__ ) == KEYMAP["esc"]:
lowerCAmelCase : int = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE__ ) == KEYMAP["mod_int"]:
lowerCAmelCase : Tuple = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(SCREAMING_SNAKE_CASE__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(SCREAMING_SNAKE_CASE__ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 693 | 1 |
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class snake_case__ ( _UpperCamelCase ):
def __init__( self : List[str] , A__ : NestedDataStructureLike[PathLike] , A__ : Optional[NamedSplit] = None , A__ : Optional[Features] = None , A__ : str = None , A__ : bool = False , A__ : bool = False , A__ : Optional[str] = None , A__ : Optional[int] = None , **A__ : Union[str, Any] , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
A__ , split=A__ , features=A__ , cache_dir=A__ , keep_in_memory=A__ , streaming=A__ , num_proc=A__ , **A__ , )
snake_case_ : Union[str, Any] = field
snake_case_ : Tuple = path_or_paths if isinstance(A__ , A__ ) else {self.split: path_or_paths}
snake_case_ : List[str] = Json(
cache_dir=A__ , data_files=A__ , features=A__ , field=A__ , **A__ , )
def UpperCAmelCase__ ( self : Dict ) -> Any:
'''simple docstring'''
if self.streaming:
snake_case_ : Tuple = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
snake_case_ : List[str] = None
snake_case_ : Tuple = None
snake_case_ : Optional[int] = None
snake_case_ : Dict = None
self.builder.download_and_prepare(
download_config=A__ , download_mode=A__ , verification_mode=A__ , base_path=A__ , num_proc=self.num_proc , )
snake_case_ : Union[str, Any] = self.builder.as_dataset(
split=self.split , verification_mode=A__ , in_memory=self.keep_in_memory )
return dataset
class snake_case__ :
def __init__( self : Dict , A__ : Dataset , A__ : Union[PathLike, BinaryIO] , A__ : Optional[int] = None , A__ : Optional[int] = None , **A__ : Union[str, Any] , ) -> Any:
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(f"num_proc {num_proc} must be an integer > 0." )
snake_case_ : Optional[Any] = dataset
snake_case_ : Optional[Any] = path_or_buf
snake_case_ : List[str] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
snake_case_ : Optional[Any] = num_proc
snake_case_ : List[Any] = "utf-8"
snake_case_ : List[Any] = to_json_kwargs
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
snake_case_ : List[Any] = self.to_json_kwargs.pop("path_or_buf" , A__ )
snake_case_ : Tuple = self.to_json_kwargs.pop("orient" , "records" )
snake_case_ : Any = self.to_json_kwargs.pop("lines" , True if orient == "records" else False )
snake_case_ : Dict = self.to_json_kwargs.pop("index" , False if orient in ["split", "table"] else True )
snake_case_ : List[str] = self.to_json_kwargs.pop("compression" , A__ )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f"`datasets` currently does not support {compression} compression" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , "wb" , compression=A__ ) as buffer:
snake_case_ : Union[str, Any] = self._write(file_obj=A__ , orient=A__ , lines=A__ , index=A__ , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f"The compression parameter is not supported when writing to a buffer, but compression={compression}"
" was passed. Please provide a local path instead." )
snake_case_ : str = self._write(
file_obj=self.path_or_buf , orient=A__ , lines=A__ , index=A__ , **self.to_json_kwargs )
return written
def UpperCAmelCase__ ( self : Optional[int] , A__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ : Union[str, Any] = args
snake_case_ : Tuple = query_table(
table=self.dataset.data , key=slice(A__ , offset + self.batch_size ) , indices=self.dataset._indices , )
snake_case_ : Optional[int] = batch.to_pandas().to_json(
path_or_buf=A__ , orient=A__ , lines=A__ , index=A__ , **A__ )
if not json_str.endswith("\n" ):
json_str += "\n"
return json_str.encode(self.encoding )
def UpperCAmelCase__ ( self : str , A__ : BinaryIO , A__ : List[str] , A__ : Any , A__ : Union[str, Any] , **A__ : List[str] , ) -> int:
'''simple docstring'''
snake_case_ : Tuple = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
snake_case_ : List[Any] = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(A__ )
else:
snake_case_ ,snake_case_ : Optional[int] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , A__ , A__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
written += file_obj.write(A__ )
return written
| 666 | from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: tuple[int, int] , lowerCAmelCase_: int ):
snake_case_ ,snake_case_ : Dict = position
snake_case_ : int = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
snake_case_ : Union[str, Any] = []
for position in positions:
snake_case_ ,snake_case_ : Union[str, Any] = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(lowerCAmelCase_ )
return permissible_positions
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[list[int]] ):
return not any(elem == 0 for row in board for elem in row )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[list[int]] , lowerCAmelCase_: tuple[int, int] , lowerCAmelCase_: int ):
if is_complete(lowerCAmelCase_ ):
return True
for position in get_valid_pos(lowerCAmelCase_ , len(lowerCAmelCase_ ) ):
snake_case_ ,snake_case_ : Dict = position
if board[y][x] == 0:
snake_case_ : List[str] = curr + 1
if open_knight_tour_helper(lowerCAmelCase_ , lowerCAmelCase_ , curr + 1 ):
return True
snake_case_ : Dict = 0
return False
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int ):
snake_case_ : Any = [[0 for i in range(lowerCAmelCase_ )] for j in range(lowerCAmelCase_ )]
for i in range(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ ):
snake_case_ : Optional[Any] = 1
if open_knight_tour_helper(lowerCAmelCase_ , (i, j) , 1 ):
return board
snake_case_ : Dict = 0
snake_case_ : str = f"Open Kight Tour cannot be performed on a board of size {n}"
raise ValueError(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 666 | 1 |
from __future__ import annotations
import math
def lowerCAmelCase__(__snake_case ,__snake_case ) -> list:
'''simple docstring'''
if len(__snake_case ) != 2 or len(a[0] ) != 2 or len(__snake_case ) != 2 or len(b[0] ) != 2:
raise Exception('''Matrices are not 2x2''' )
lowerCamelCase__ = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def lowerCAmelCase__(__snake_case ,__snake_case ) -> List[str]:
'''simple docstring'''
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__snake_case ) )
]
def lowerCAmelCase__(__snake_case ,__snake_case ) -> str:
'''simple docstring'''
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__snake_case ) )
]
def lowerCAmelCase__(__snake_case ) -> tuple[list, list, list, list]:
'''simple docstring'''
if len(__snake_case ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('''Odd matrices are not supported!''' )
lowerCamelCase__ = len(__snake_case )
lowerCamelCase__ = matrix_length // 2
lowerCamelCase__ = [[a[i][j] for j in range(__snake_case ,__snake_case )] for i in range(__snake_case )]
lowerCamelCase__ = [
[a[i][j] for j in range(__snake_case ,__snake_case )] for i in range(__snake_case ,__snake_case )
]
lowerCamelCase__ = [[a[i][j] for j in range(__snake_case )] for i in range(__snake_case )]
lowerCamelCase__ = [[a[i][j] for j in range(__snake_case )] for i in range(__snake_case ,__snake_case )]
return top_left, top_right, bot_left, bot_right
def lowerCAmelCase__(__snake_case ) -> tuple[int, int]:
'''simple docstring'''
return len(__snake_case ), len(matrix[0] )
def lowerCAmelCase__(__snake_case ) -> None:
'''simple docstring'''
print('''\n'''.join(str(__snake_case ) for line in matrix ) )
def lowerCAmelCase__(__snake_case ,__snake_case ) -> list:
'''simple docstring'''
if matrix_dimensions(__snake_case ) == (2, 2):
return default_matrix_multiplication(__snake_case ,__snake_case )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = split_matrix(__snake_case )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = split_matrix(__snake_case )
lowerCamelCase__ = actual_strassen(__snake_case ,matrix_subtraction(__snake_case ,__snake_case ) )
lowerCamelCase__ = actual_strassen(matrix_addition(__snake_case ,__snake_case ) ,__snake_case )
lowerCamelCase__ = actual_strassen(matrix_addition(__snake_case ,__snake_case ) ,__snake_case )
lowerCamelCase__ = actual_strassen(__snake_case ,matrix_subtraction(__snake_case ,__snake_case ) )
lowerCamelCase__ = actual_strassen(matrix_addition(__snake_case ,__snake_case ) ,matrix_addition(__snake_case ,__snake_case ) )
lowerCamelCase__ = actual_strassen(matrix_subtraction(__snake_case ,__snake_case ) ,matrix_addition(__snake_case ,__snake_case ) )
lowerCamelCase__ = actual_strassen(matrix_subtraction(__snake_case ,__snake_case ) ,matrix_addition(__snake_case ,__snake_case ) )
lowerCamelCase__ = matrix_addition(matrix_subtraction(matrix_addition(__snake_case ,__snake_case ) ,__snake_case ) ,__snake_case )
lowerCamelCase__ = matrix_addition(__snake_case ,__snake_case )
lowerCamelCase__ = matrix_addition(__snake_case ,__snake_case )
lowerCamelCase__ = matrix_subtraction(matrix_subtraction(matrix_addition(__snake_case ,__snake_case ) ,__snake_case ) ,__snake_case )
# construct the new matrix from our 4 quadrants
lowerCamelCase__ = []
for i in range(len(__snake_case ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(__snake_case ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def lowerCAmelCase__(__snake_case ,__snake_case ) -> list:
'''simple docstring'''
if matrix_dimensions(__snake_case )[1] != matrix_dimensions(__snake_case )[0]:
lowerCamelCase__ = (
'''Unable to multiply these matrices, please check the dimensions.\n'''
F'Matrix A: {matrixa}\n'
F'Matrix B: {matrixa}'
)
raise Exception(__snake_case )
lowerCamelCase__ = matrix_dimensions(__snake_case )
lowerCamelCase__ = matrix_dimensions(__snake_case )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
lowerCamelCase__ = max(*__snake_case ,*__snake_case )
lowerCamelCase__ = int(math.pow(2 ,math.ceil(math.loga(__snake_case ) ) ) )
lowerCamelCase__ = matrixa
lowerCamelCase__ = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 ,__snake_case ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] ,__snake_case ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] ,__snake_case ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
lowerCamelCase__ = actual_strassen(__snake_case ,__snake_case )
# Removing the additional zeros
for i in range(0 ,__snake_case ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] ,__snake_case ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
_a = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
_a = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 29 |
from __future__ import annotations
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> float:
'''simple docstring'''
if days_between_payments <= 0:
raise ValueError('''days_between_payments must be > 0''' )
if daily_interest_rate < 0:
raise ValueError('''daily_interest_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * daily_interest_rate * days_between_payments
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,) -> float:
'''simple docstring'''
if number_of_compounding_periods <= 0:
raise ValueError('''number_of_compounding_periods must be > 0''' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,) -> float:
'''simple docstring'''
if number_of_years <= 0:
raise ValueError('''number_of_years must be > 0''' )
if nominal_annual_percentage_rate < 0:
raise ValueError('''nominal_annual_percentage_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return compound_interest(
__snake_case ,nominal_annual_percentage_rate / 365 ,number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case : str = {
'configuration_table_transformer': [
'TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TableTransformerConfig',
'TableTransformerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Dict = [
'TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TableTransformerForObjectDetection',
'TableTransformerModel',
'TableTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
_snake_case : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 693 |
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
_snake_case : str = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =["""input_values""", """padding_mask"""]
def __init__( self, _a = 1, _a = 2_40_00, _a = 0.0, _a = None, _a = None, **_a, ) -> str:
super().__init__(feature_size=_a, sampling_rate=_a, padding_value=_a, **_a )
__SCREAMING_SNAKE_CASE = chunk_length_s
__SCREAMING_SNAKE_CASE = overlap
@property
def __lowerCAmelCase ( self ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __lowerCAmelCase ( self ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1, int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self, _a, _a = None, _a = False, _a = None, _a = None, _a = None, ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if padding and truncation:
raise ValueError("Both padding and truncation were set. Make sure you only set one." )
elif padding is None:
# by default let's pad the inputs
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = bool(
isinstance(_a, (list, tuple) ) and (isinstance(raw_audio[0], (np.ndarray, tuple, list) )) )
if is_batched:
__SCREAMING_SNAKE_CASE = [np.asarray(_a, dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(_a, np.ndarray ):
__SCREAMING_SNAKE_CASE = np.asarray(_a, dtype=np.floataa )
elif isinstance(_a, np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
__SCREAMING_SNAKE_CASE = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
__SCREAMING_SNAKE_CASE = [np.asarray(_a ).T]
# verify inputs are valid
for idx, example in enumerate(_a ):
if example.ndim > 2:
raise ValueError(f'''Expected input shape (channels, length) but got shape {example.shape}''' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f'''Expected mono audio but example has {example.shape[-1]} channels''' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(f'''Expected stereo audio but example has {example.shape[-1]} channels''' )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = BatchFeature({"input_values": raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
__SCREAMING_SNAKE_CASE = min(array.shape[0] for array in raw_audio )
__SCREAMING_SNAKE_CASE = int(np.floor(max_length / self.chunk_stride ) )
__SCREAMING_SNAKE_CASE = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
__SCREAMING_SNAKE_CASE = max(array.shape[0] for array in raw_audio )
__SCREAMING_SNAKE_CASE = int(np.ceil(max_length / self.chunk_stride ) )
__SCREAMING_SNAKE_CASE = (nb_step - 1) * self.chunk_stride + self.chunk_length
__SCREAMING_SNAKE_CASE = "max_length"
else:
__SCREAMING_SNAKE_CASE = input_values
# normal padding on batch
if padded_inputs is None:
__SCREAMING_SNAKE_CASE = self.pad(
_a, max_length=_a, truncation=_a, padding=_a, return_attention_mask=_a, )
if padding:
__SCREAMING_SNAKE_CASE = padded_inputs.pop("attention_mask" )
__SCREAMING_SNAKE_CASE = []
for example in padded_inputs.pop("input_values" ):
if self.feature_size == 1:
__SCREAMING_SNAKE_CASE = example[..., None]
input_values.append(example.T )
__SCREAMING_SNAKE_CASE = input_values
if return_tensors is not None:
__SCREAMING_SNAKE_CASE = padded_inputs.convert_to_tensors(_a )
return padded_inputs
| 693 | 1 |
from __future__ import annotations
def lowercase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
'''simple docstring'''
__lowercase = len(_UpperCamelCase )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(_UpperCamelCase ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , _UpperCamelCase , _UpperCamelCase , )
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
__lowercase = []
depth_first_search([] , [] , [] , _UpperCamelCase , _UpperCamelCase )
# Print all the boards
for board in boards:
for column in board:
print(_UpperCamelCase )
print('''''' )
print(len(_UpperCamelCase ) , '''solutions were found.''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 527 |
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
a : Optional[int] = datasets.logging.get_logger(__name__)
a : Tuple = '''\
@inproceedings{bleurt,
title={BLEURT: Learning Robust Metrics for Text Generation},
author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},
booktitle={ACL},
year={2020},
url={https://arxiv.org/abs/2004.04696}
}
'''
a : Union[str, Any] = '''\
BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)
and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune
it for your specific application (the latter is expected to perform better).
See the project\'s README at https://github.com/google-research/bleurt#readme for more information.
'''
a : Union[str, Any] = '''
BLEURT score.
Args:
`predictions` (list of str): prediction/candidate sentences
`references` (list of str): reference sentences
`checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.
Returns:
\'scores\': List of scores.
Examples:
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> bleurt = datasets.load_metric("bleurt")
>>> results = bleurt.compute(predictions=predictions, references=references)
>>> print([round(v, 2) for v in results["scores"]])
[1.03, 1.04]
'''
a : Tuple = {
'''bleurt-tiny-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip''',
'''bleurt-tiny-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip''',
'''bleurt-base-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip''',
'''bleurt-base-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip''',
'''bleurt-large-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip''',
'''bleurt-large-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip''',
'''BLEURT-20-D3''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip''',
'''BLEURT-20-D6''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip''',
'''BLEURT-20-D12''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip''',
'''BLEURT-20''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip''',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
'''simple docstring'''
def A ( self ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/google-research/bleurt''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/bleurt'''] , reference_urls=['''https://github.com/google-research/bleurt''', '''https://arxiv.org/abs/2004.04696'''] , )
def A ( self , snake_case_ ) -> List[str]:
'''simple docstring'''
if self.config_name == "default":
logger.warning(
'''Using default BLEURT-Base checkpoint for sequence maximum length 128. '''
'''You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').''' )
__lowercase = '''bleurt-base-128'''
if self.config_name.lower() in CHECKPOINT_URLS:
__lowercase = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
__lowercase = self.config_name.upper()
else:
raise KeyError(
F'{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}' )
# download the model checkpoint specified by self.config_name and set up the scorer
__lowercase = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
__lowercase = score.BleurtScorer(os.path.join(snake_case_ , snake_case_ ) )
def A ( self , snake_case_ , snake_case_ ) -> Tuple:
'''simple docstring'''
__lowercase = self.scorer.score(references=snake_case_ , candidates=snake_case_ )
return {"scores": scores}
| 527 | 1 |
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
_a : List[Any] = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class __A (unittest.TestCase , __magic_name__ ):
def _snake_case ( self ):
__UpperCAmelCase : List[str] = load_tool("text-question-answering" )
self.tool.setup()
__UpperCAmelCase : List[str] = load_tool("text-question-answering" , remote=UpperCamelCase_ )
def _snake_case ( self ):
__UpperCAmelCase : List[str] = self.tool(UpperCamelCase_ , "What did Hugging Face do in April 2021?" )
self.assertEqual(UpperCamelCase_ , "launched the BigScience Research Workshop" )
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = self.remote_tool(UpperCamelCase_ , "What did Hugging Face do in April 2021?" )
self.assertEqual(UpperCamelCase_ , "launched the BigScience Research Workshop" )
def _snake_case ( self ):
__UpperCAmelCase : int = self.tool(text=UpperCamelCase_ , question="What did Hugging Face do in April 2021?" )
self.assertEqual(UpperCamelCase_ , "launched the BigScience Research Workshop" )
def _snake_case ( self ):
__UpperCAmelCase : List[str] = self.remote_tool(text=UpperCamelCase_ , question="What did Hugging Face do in April 2021?" )
self.assertEqual(UpperCamelCase_ , "launched the BigScience Research Workshop" )
| 168 | '''simple docstring'''
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_a : Any = logging.get_logger(__name__)
_a : Optional[Any] = "▁"
_a : str = {"vocab_file": "prophetnet.tokenizer"}
_a : Any = {
"vocab_file": {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer"
),
}
}
_a : List[Any] = {
"microsoft/xprophetnet-large-wiki100-cased": {"do_lower_case": False},
}
_a : int = {
"microsoft/xprophetnet-large-wiki100-cased": 512,
}
def _lowercase ( lowerCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
__UpperCAmelCase : List[str] = collections.OrderedDict()
with open(lowerCamelCase__ , "r" , encoding="utf-8" ) as reader:
__UpperCAmelCase : Any = reader.readlines()
for index, token in enumerate(lowerCamelCase__ ):
__UpperCAmelCase : List[str] = token.rstrip("\n" )
__UpperCAmelCase : Optional[int] = index
return vocab
class __A (__magic_name__ ):
snake_case :Tuple = VOCAB_FILES_NAMES
snake_case :Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case :Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case :str = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase_ , UpperCamelCase_="[SEP]" , UpperCamelCase_="[SEP]" , UpperCamelCase_="[SEP]" , UpperCamelCase_="[UNK]" , UpperCamelCase_="[PAD]" , UpperCamelCase_="[CLS]" , UpperCamelCase_="[MASK]" , UpperCamelCase_ = None , **UpperCamelCase_ , ):
__UpperCAmelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
__UpperCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase_ ) )
__UpperCAmelCase : List[str] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
__UpperCAmelCase : Union[str, Any] = {"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4}
for i in range(10 ):
__UpperCAmelCase : List[str] = f"""[unused{i}]"""
__UpperCAmelCase : Any = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
__UpperCAmelCase : Any = 12
__UpperCAmelCase : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(UpperCamelCase_ )
def __getstate__( self ):
__UpperCAmelCase : Dict = self.__dict__.copy()
__UpperCAmelCase : Tuple = None
return state
def __setstate__( self , UpperCamelCase_ ):
__UpperCAmelCase : Dict = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__UpperCAmelCase : Any = {}
__UpperCAmelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return ([0] * len(UpperCamelCase_ )) + [1]
return ([0] * len(UpperCamelCase_ )) + [1] + ([0] * len(UpperCamelCase_ )) + [1]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase : Union[str, Any] = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _snake_case ( self ):
return len(self.sp_model ) + self.fairseq_offset
def _snake_case ( self ):
__UpperCAmelCase : List[Any] = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self , UpperCamelCase_ ):
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__UpperCAmelCase : Dict = self.sp_model.PieceToId(UpperCamelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _snake_case ( self , UpperCamelCase_ ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Dict = "".join(UpperCamelCase_ ).replace(UpperCamelCase_ , " " ).strip()
return out_string
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__UpperCAmelCase : int = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , "wb" ) as fi:
__UpperCAmelCase : str = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,)
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
__UpperCAmelCase : str = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 168 | 1 |
'''simple docstring'''
from collections import namedtuple
lowerCAmelCase = namedtuple("""from_to""", """from_ to""")
lowerCAmelCase = {
"""cubicmeter""": from_to(1, 1),
"""litre""": from_to(0.001, 10_00),
"""kilolitre""": from_to(1, 1),
"""gallon""": from_to(0.00454, 264.172),
"""cubicyard""": from_to(0.76455, 1.30795),
"""cubicfoot""": from_to(0.028, 35.3147),
"""cup""": from_to(0.000236588, 4226.75),
}
def __A ( a_ : float ,a_ : str ,a_ : str ):
if from_type not in METRIC_CONVERSION:
raise ValueError(
f'''Invalid \'from_type\' value: {from_type!r} Supported values are:\n'''
+ ", ".join(a_ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f'''Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'''
+ ", ".join(a_ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721 |
'''simple docstring'''
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowerCamelCase ( _A , _A , _A ):
@register_to_config
def __init__( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ = False , ):
super().__init__()
lowerCAmelCase : int = nn.Embedding(a_ , a_ )
lowerCAmelCase : int = nn.Embedding(a_ , a_ )
lowerCAmelCase : int = False
lowerCAmelCase : int = nn.Dropout(p=a_ )
lowerCAmelCase : str = TaConfig(
vocab_size=a_ , d_model=a_ , num_heads=a_ , d_kv=a_ , d_ff=a_ , dropout_rate=a_ , feed_forward_proj=a_ , is_decoder=a_ , is_encoder_decoder=a_ , )
lowerCAmelCase : Any = nn.ModuleList()
for lyr_num in range(a_ ):
lowerCAmelCase : Optional[Any] = TaBlock(a_ )
self.encoders.append(a_ )
lowerCAmelCase : List[str] = TaLayerNorm(a_ )
lowerCAmelCase : Union[str, Any] = nn.Dropout(p=a_ )
def _lowerCamelCase ( self , a_ , a_ ):
lowerCAmelCase : Optional[Any] = self.token_embedder(a_ )
lowerCAmelCase : int = encoder_input_tokens.shape[1]
lowerCAmelCase : Optional[int] = torch.arange(a_ , device=encoder_input_tokens.device )
x += self.position_encoding(a_ )
lowerCAmelCase : List[str] = self.dropout_pre(a_ )
# inverted the attention mask
lowerCAmelCase : List[Any] = encoder_input_tokens.size()
lowerCAmelCase : Dict = self.get_extended_attention_mask(a_ , a_ )
for lyr in self.encoders:
lowerCAmelCase : Dict = lyr(a_ , a_ )[0]
lowerCAmelCase : int = self.layer_norm(a_ )
return self.dropout_post(a_ ), encoder_inputs_mask
| 551 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
A_ = {
"""configuration_ernie""": ["""ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ErnieConfig""", """ErnieOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ErnieForCausalLM""",
"""ErnieForMaskedLM""",
"""ErnieForMultipleChoice""",
"""ErnieForNextSentencePrediction""",
"""ErnieForPreTraining""",
"""ErnieForQuestionAnswering""",
"""ErnieForSequenceClassification""",
"""ErnieForTokenClassification""",
"""ErnieModel""",
"""ErniePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 393 |
def A__ ( _a : int ):
'''simple docstring'''
snake_case__ : str =generate_pascal_triangle(_a )
for row_idx in range(_a ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=""" """ )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=""" """ )
else:
print(triangle[row_idx][col_idx] , end="""""" )
print()
def A__ ( _a : int ):
'''simple docstring'''
if not isinstance(_a , _a ):
raise TypeError("""The input value of 'num_rows' should be 'int'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""" )
snake_case__ : list[list[int]] =[]
for current_row_idx in range(_a ):
snake_case__ : Optional[Any] =populate_current_row(_a , _a )
triangle.append(_a )
return triangle
def A__ ( _a : list[list[int]] , _a : int ):
'''simple docstring'''
snake_case__ : Any =[-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
snake_case__ , snake_case__ : List[str] =1, 1
for current_col_idx in range(1 , _a ):
calculate_current_element(
_a , _a , _a , _a )
return current_row
def A__ ( _a : list[list[int]] , _a : list[int] , _a : int , _a : int , ):
'''simple docstring'''
snake_case__ : List[Any] =triangle[current_row_idx - 1][current_col_idx - 1]
snake_case__ : Tuple =triangle[current_row_idx - 1][current_col_idx]
snake_case__ : Union[str, Any] =above_to_left_elt + above_to_right_elt
def A__ ( _a : int ):
'''simple docstring'''
if not isinstance(_a , _a ):
raise TypeError("""The input value of 'num_rows' should be 'int'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""" )
snake_case__ : list[list[int]] =[[1]]
for row_index in range(1 , _a ):
snake_case__ : Tuple =[0] + result[-1] + [0]
snake_case__ : Optional[Any] =row_index + 1
# Calculate the number of distinct elements in a row
snake_case__ : int =sum(divmod(_a , 2 ) )
snake_case__ : List[str] =[
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
snake_case__ : List[str] =row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
snake_case__ : Optional[int] =row_first_half + row_second_half
result.append(_a )
return result
def A__ ( ):
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_a : Callable , _a : int ) -> None:
snake_case__ : List[str] =f"{func.__name__}({value})"
snake_case__ : Tuple =timeit(f"__main__.{call}" , setup="""import __main__""" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(_a , _a )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 385 | 0 |
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = FunnelTokenizer
UpperCAmelCase = FunnelTokenizerFast
UpperCAmelCase = True
UpperCAmelCase = True
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
super().setUp()
_SCREAMING_SNAKE_CASE =[
'''<unk>''',
'''<cls>''',
'''<sep>''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __UpperCamelCase ( self : Optional[int] , **_a : Optional[int] ) -> int:
"""simple docstring"""
return FunnelTokenizer.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : int , **_a : Optional[Any] ) -> Any:
"""simple docstring"""
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : Union[str, Any] , _a : str ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''UNwant\u00E9d,running'''
_SCREAMING_SNAKE_CASE ='''unwanted, running'''
return input_text, output_text
def __UpperCamelCase ( self : Any ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.tokenizer_class(self.vocab_file )
_SCREAMING_SNAKE_CASE =tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_a , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [7, 4, 5, 10, 8, 9] )
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
_SCREAMING_SNAKE_CASE =tokenizer('''UNwant\u00E9d,running''' )
_SCREAMING_SNAKE_CASE =len(inputs['''input_ids'''] ) - 1
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len )
_SCREAMING_SNAKE_CASE =tokenizer('''UNwant\u00E9d,running''' , '''UNwant\u00E9d,running''' )
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len + [1] * sentence_len ) | 191 |
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
snake_case_ : str = True
except ImportError:
snake_case_ : Optional[Any] = False
snake_case_ : int = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowerCamelCase( a__):
return AddNewModelCommand(args.testing ,args.testing_file ,path=args.path)
class A__ ( UpperCamelCase__ ):
@staticmethod
def __UpperCamelCase ( _a : ArgumentParser ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =parser.add_parser('''add-new-model''' )
add_new_model_parser.add_argument('''--testing''' , action='''store_true''' , help='''If in testing mode.''' )
add_new_model_parser.add_argument('''--testing_file''' , type=_a , help='''Configuration file on which to run.''' )
add_new_model_parser.add_argument(
'''--path''' , type=_a , help='''Path to cookiecutter. Should only be used for testing purposes.''' )
add_new_model_parser.set_defaults(func=_a )
def __init__( self : List[str] , _a : bool , _a : str , _a : Any=None , *_a : Union[str, Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =testing
_SCREAMING_SNAKE_CASE =testing_file
_SCREAMING_SNAKE_CASE =path
def __UpperCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
warnings.warn(
'''The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '''
'''It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '''
'''checks, you should use `transformers-cli add-new-model-like` instead.''' )
if not _has_cookiecutter:
raise ImportError(
'''Model creation dependencies are required to use the `add_new_model` command. Install them by running '''
'''the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n''' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
_SCREAMING_SNAKE_CASE =[directory for directory in os.listdir() if '''cookiecutter-template-''' == directory[:22]]
if len(_a ) > 0:
raise ValueError(
'''Several directories starting with `cookiecutter-template-` in current working directory. '''
'''Please clean your directory by removing all folders starting with `cookiecutter-template-` or '''
'''change your working directory.''' )
_SCREAMING_SNAKE_CASE =(
Path(_a ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
_SCREAMING_SNAKE_CASE =path_to_transformer_root / '''templates''' / '''adding_a_new_model'''
# Execute cookiecutter
if not self._testing:
cookiecutter(str(_a ) )
else:
with open(self._testing_file , '''r''' ) as configuration_file:
_SCREAMING_SNAKE_CASE =json.load(_a )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=_a , extra_context=_a , )
_SCREAMING_SNAKE_CASE =[directory for directory in os.listdir() if '''cookiecutter-template-''' in directory[:22]][0]
# Retrieve configuration
with open(directory + '''/configuration.json''' , '''r''' ) as configuration_file:
_SCREAMING_SNAKE_CASE =json.load(_a )
_SCREAMING_SNAKE_CASE =configuration['''lowercase_modelname''']
_SCREAMING_SNAKE_CASE =configuration['''generate_tensorflow_pytorch_and_flax''']
os.remove(f"{directory}/configuration.json" )
_SCREAMING_SNAKE_CASE ='''PyTorch''' in generate_tensorflow_pytorch_and_flax
_SCREAMING_SNAKE_CASE ='''TensorFlow''' in generate_tensorflow_pytorch_and_flax
_SCREAMING_SNAKE_CASE ='''Flax''' in generate_tensorflow_pytorch_and_flax
_SCREAMING_SNAKE_CASE =f"{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"
os.makedirs(_a , exist_ok=_a )
os.makedirs(f"{path_to_transformer_root}/tests/models/{lowercase_model_name}" , exist_ok=_a )
# Tests require submodules as they have parent imports
with open(f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py" , '''w''' ):
pass
shutil.move(
f"{directory}/__init__.py" , f"{model_dir}/__init__.py" , )
shutil.move(
f"{directory}/configuration_{lowercase_model_name}.py" , f"{model_dir}/configuration_{lowercase_model_name}.py" , )
def remove_copy_lines(_a : int ):
with open(_a , '''r''' ) as f:
_SCREAMING_SNAKE_CASE =f.readlines()
with open(_a , '''w''' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(_a )
if output_pytorch:
if not self._testing:
remove_copy_lines(f"{directory}/modeling_{lowercase_model_name}.py" )
shutil.move(
f"{directory}/modeling_{lowercase_model_name}.py" , f"{model_dir}/modeling_{lowercase_model_name}.py" , )
shutil.move(
f"{directory}/test_modeling_{lowercase_model_name}.py" , f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py" , )
else:
os.remove(f"{directory}/modeling_{lowercase_model_name}.py" )
os.remove(f"{directory}/test_modeling_{lowercase_model_name}.py" )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f"{directory}/modeling_tf_{lowercase_model_name}.py" )
shutil.move(
f"{directory}/modeling_tf_{lowercase_model_name}.py" , f"{model_dir}/modeling_tf_{lowercase_model_name}.py" , )
shutil.move(
f"{directory}/test_modeling_tf_{lowercase_model_name}.py" , f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py" , )
else:
os.remove(f"{directory}/modeling_tf_{lowercase_model_name}.py" )
os.remove(f"{directory}/test_modeling_tf_{lowercase_model_name}.py" )
if output_flax:
if not self._testing:
remove_copy_lines(f"{directory}/modeling_flax_{lowercase_model_name}.py" )
shutil.move(
f"{directory}/modeling_flax_{lowercase_model_name}.py" , f"{model_dir}/modeling_flax_{lowercase_model_name}.py" , )
shutil.move(
f"{directory}/test_modeling_flax_{lowercase_model_name}.py" , f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py" , )
else:
os.remove(f"{directory}/modeling_flax_{lowercase_model_name}.py" )
os.remove(f"{directory}/test_modeling_flax_{lowercase_model_name}.py" )
shutil.move(
f"{directory}/{lowercase_model_name}.md" , f"{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md" , )
shutil.move(
f"{directory}/tokenization_{lowercase_model_name}.py" , f"{model_dir}/tokenization_{lowercase_model_name}.py" , )
shutil.move(
f"{directory}/tokenization_fast_{lowercase_model_name}.py" , f"{model_dir}/tokenization_{lowercase_model_name}_fast.py" , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(_a : str , _a : str , _a : List[str] ):
# Create temp file
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =mkstemp()
_SCREAMING_SNAKE_CASE =False
with fdopen(_a , '''w''' ) as new_file:
with open(_a ) as old_file:
for line in old_file:
new_file.write(_a )
if line_to_copy_below in line:
_SCREAMING_SNAKE_CASE =True
for line_to_copy in lines_to_copy:
new_file.write(_a )
if not line_found:
raise ValueError(f"Line {line_to_copy_below} was not found in file." )
# Copy the file permissions from the old file to the new file
copymode(_a , _a )
# Remove original file
remove(_a )
# Move new file
move(_a , _a )
def skip_units(_a : str ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(_a : Any ):
with open(_a ) as datafile:
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
_SCREAMING_SNAKE_CASE =line.split('''"''' )[1]
_SCREAMING_SNAKE_CASE =skip_units(_a )
elif "# Below: " in line and "##" not in line:
_SCREAMING_SNAKE_CASE =line.split('''"''' )[1]
_SCREAMING_SNAKE_CASE =skip_units(_a )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(_a , _a , _a )
_SCREAMING_SNAKE_CASE =[]
elif "# Replace with" in line and "##" not in line:
_SCREAMING_SNAKE_CASE =[]
elif "##" not in line:
lines_to_copy.append(_a )
remove(_a )
replace_in_files(f"{directory}/to_replace_{lowercase_model_name}.py" )
os.rmdir(_a ) | 191 | 1 |
import warnings
from ..trainer import Trainer
from ..utils import logging
__lowerCamelCase : Any = logging.get_logger(__name__)
class __magic_name__ ( a_ ):
def __init__( self : Tuple , UpperCamelCase__ : int=None , **UpperCamelCase__ : Any ) -> int:
'''simple docstring'''
warnings.warn(
"`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` "
"instead." , _A , )
super().__init__(args=_A , **_A )
| 323 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__A : Dict = logging.get_logger(__name__)
__A : Optional[int] = "▁"
__A : Dict = {"vocab_file": "spiece.model"}
__A : Optional[Any] = {
"vocab_file": {
"google/reformer-crime-and-punishment": (
"https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"
)
}
}
__A : Optional[Any] = {
"google/reformer-crime-and-punishment": 524_288,
}
class A_ (a_ ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = ['''input_ids''', '''attention_mask''']
def __init__( self , _A , _A="</s>" , _A="<unk>" , _A=[] , _A = None , **_A , ):
'''simple docstring'''
UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_A , unk_token=_A , additional_special_tokens=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , )
UpperCAmelCase = vocab_file
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_A )
@property
def _lowercase ( self ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
UpperCAmelCase = self.__dict__.copy()
UpperCAmelCase = None
return state
def __setstate__( self , _A ):
'''simple docstring'''
UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCAmelCase = {}
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowercase ( self , _A ):
'''simple docstring'''
return self.sp_model.encode(_A , out_type=_A )
def _lowercase ( self , _A ):
'''simple docstring'''
return self.sp_model.piece_to_id(_A )
def _lowercase ( self , _A ):
'''simple docstring'''
if index < self.sp_model.get_piece_size():
UpperCAmelCase = self.sp_model.IdToPiece(_A )
return token
def _lowercase ( self , _A ):
'''simple docstring'''
UpperCAmelCase = []
UpperCAmelCase = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_A ) + token
UpperCAmelCase = []
else:
current_sub_tokens.append(_A )
out_string += self.sp_model.decode(_A )
return out_string.strip()
def _lowercase ( self , _A , _A = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _A )
elif not os.path.isfile(self.vocab_file ):
with open(_A , '''wb''' ) as fi:
UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
| 130 | 0 |
"""simple docstring"""
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
_A = logging.get_logger(__name__)
class _lowerCamelCase ( a_ ):
_lowerCamelCase :Optional[int] = "linear"
_lowerCamelCase :Optional[int] = "cosine"
_lowerCamelCase :int = "cosine_with_restarts"
_lowerCamelCase :Dict = "polynomial"
_lowerCamelCase :Any = "constant"
_lowerCamelCase :str = "constant_with_warmup"
_lowerCamelCase :Optional[Any] = "piecewise_constant"
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase = -1 ) -> Optional[Any]:
return LambdaLR(__UpperCAmelCase , lambda __UpperCAmelCase : 1 , last_epoch=__UpperCAmelCase )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = -1 ) -> Optional[int]:
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1.0 , __UpperCAmelCase ) )
return 1.0
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , last_epoch=__UpperCAmelCase )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = -1 ) -> Any:
lowerCAmelCase__ : Tuple = {}
lowerCAmelCase__ : Tuple = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = rule_str.split(""":""" )
lowerCAmelCase__ : str = int(__UpperCAmelCase )
lowerCAmelCase__ : Tuple = float(__UpperCAmelCase )
lowerCAmelCase__ : str = value
lowerCAmelCase__ : Dict = float(rule_list[-1] )
def create_rules_function(__UpperCAmelCase , __UpperCAmelCase ):
def rule_func(__UpperCAmelCase ) -> float:
lowerCAmelCase__ : List[Any] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__UpperCAmelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
lowerCAmelCase__ : Tuple = create_rules_function(__UpperCAmelCase , __UpperCAmelCase )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , last_epoch=__UpperCAmelCase )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=-1 ) -> Optional[int]:
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0.5 , __UpperCAmelCase = -1 ) -> str:
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
lowerCAmelCase__ : Any = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__UpperCAmelCase ) * 2.0 * progress )) )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 1 , __UpperCAmelCase = -1 ) -> List[Any]:
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
lowerCAmelCase__ : Tuple = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__UpperCAmelCase ) * progress) % 1.0) )) )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1E-7 , __UpperCAmelCase=1.0 , __UpperCAmelCase=-1 ) -> Optional[int]:
lowerCAmelCase__ : str = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
lowerCAmelCase__ : Dict = lr_init - lr_end
lowerCAmelCase__ : Tuple = num_training_steps - num_warmup_steps
lowerCAmelCase__ : Optional[int] = 1 - (current_step - num_warmup_steps) / decay_steps
lowerCAmelCase__ : Optional[int] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
_A = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 1 , __UpperCAmelCase = 1.0 , __UpperCAmelCase = -1 , ) -> List[str]:
lowerCAmelCase__ : Any = SchedulerType(__UpperCAmelCase )
lowerCAmelCase__ : Tuple = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__UpperCAmelCase , last_epoch=__UpperCAmelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__UpperCAmelCase , step_rules=__UpperCAmelCase , last_epoch=__UpperCAmelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , last_epoch=__UpperCAmelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , num_cycles=__UpperCAmelCase , last_epoch=__UpperCAmelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , power=__UpperCAmelCase , last_epoch=__UpperCAmelCase , )
return schedule_func(
__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , last_epoch=__UpperCAmelCase )
| 507 |
"""simple docstring"""
def lowercase_ ( __UpperCAmelCase ) -> str:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("""'float' object cannot be interpreted as an integer""" )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("""'str' object cannot be interpreted as an integer""" )
if num == 0:
return "0b0"
lowerCAmelCase__ : Optional[Any] = False
if num < 0:
lowerCAmelCase__ : List[Any] = True
lowerCAmelCase__ : List[Any] = -num
lowerCAmelCase__ : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(__UpperCAmelCase ) for e in binary )
return "0b" + "".join(str(__UpperCAmelCase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 507 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase_ = {
"vocab_file": {"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"},
"tokenizer_file": {
"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"
},
}
UpperCAmelCase_ = {"mobilebert-uncased": 5_12}
UpperCAmelCase_ = {}
class __lowercase ( __magic_name__ ):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_INIT_CONFIGURATION
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = MobileBertTokenizer
def __init__( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=True , UpperCamelCase="[UNK]" , UpperCamelCase="[SEP]" , UpperCamelCase="[PAD]" , UpperCamelCase="[CLS]" , UpperCamelCase="[MASK]" , UpperCamelCase=True , UpperCamelCase=None , **UpperCamelCase , ) -> str:
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
__a = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCamelCase ) != tokenize_chinese_chars
):
__a = getattr(UpperCamelCase , normalizer_state.pop('type' ) )
__a = do_lower_case
__a = strip_accents
__a = tokenize_chinese_chars
__a = normalizer_class(**UpperCamelCase )
__a = do_lower_case
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase=None ) -> Tuple:
__a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase = None ) -> List[int]:
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase = None ) -> Tuple[str]:
__a = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
| 539 |
'''simple docstring'''
class __lowercase : # Public class to implement a graph
def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> None:
__a = row
__a = col
__a = graph
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> bool:
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> None:
# Checking all 8 elements surrounding nth element
__a = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
__a = [-1, 0, 1, -1, 1, -1, 0, 1]
__a = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase )
def UpperCamelCase__ ( self ) -> int: # And finally, count all islands.
__a = [[False for j in range(self.COL )] for i in range(self.ROW )]
__a = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(UpperCamelCase , UpperCamelCase , UpperCamelCase )
count += 1
return count
| 539 | 1 |
def _a ( __UpperCamelCase ):
return "".join([hex(__UpperCamelCase )[2:].zfill(2 ).upper() for byte in list(__UpperCamelCase )] )
def _a ( __UpperCamelCase ):
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(__UpperCamelCase ) % 2) != 0:
raise ValueError(
"""Base16 encoded data is invalid:
Data does not have an even number of hex digits.""" )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(__UpperCamelCase ) <= set("""0123456789ABCDEF""" ):
raise ValueError(
"""Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.""" )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 1_6 ) for i in range(0 , len(__UpperCamelCase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 478 |
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class a__ ( lowerCAmelCase_ , unittest.TestCase ):
lowerCamelCase__: int = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def UpperCAmelCase( self : int , lowerCamelCase_ : Union[str, Any]=0 ):
a_ : Any = np.random.RandomState(lowerCamelCase_ )
a_ : Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase( self : List[Any] ):
a_ : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
a_ : Optional[int] = self.get_dummy_inputs()
a_ : Optional[Any] = pipe(**lowerCamelCase_ ).images
a_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
a_ : str = np.array([0.6_5_0_7_2, 0.5_8_4_9_2, 0.4_8_2_1_9, 0.5_5_5_2_1, 0.5_3_1_8_0, 0.5_5_9_3_9, 0.5_0_6_9_7, 0.3_9_8_0_0, 0.4_6_4_5_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase( self : List[str] ):
a_ : str = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
a_ : Any = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
a_ : Dict = self.get_dummy_inputs()
a_ : Optional[int] = pipe(**lowerCamelCase_ ).images
a_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
a_ : Tuple = np.array([0.6_5_8_6_3, 0.5_9_4_2_5, 0.4_9_3_2_6, 0.5_6_3_1_3, 0.5_3_8_7_5, 0.5_6_6_2_7, 0.5_1_0_6_5, 0.3_9_7_7_7, 0.4_6_3_3_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase( self : List[Any] ):
a_ : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
a_ : Optional[int] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
a_ : Dict = self.get_dummy_inputs()
a_ : Union[str, Any] = pipe(**lowerCamelCase_ ).images
a_ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
a_ : Optional[int] = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase( self : Any ):
a_ : int = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
a_ : Optional[Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
a_ : Optional[Any] = self.get_dummy_inputs()
a_ : Optional[Any] = pipe(**lowerCamelCase_ ).images
a_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
a_ : Optional[int] = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase( self : List[str] ):
a_ : Any = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
a_ : str = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
a_ : Dict = self.get_dummy_inputs()
a_ : Union[str, Any] = pipe(**lowerCamelCase_ ).images
a_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
a_ : Tuple = np.array([0.5_3_8_1_7, 0.6_0_8_1_2, 0.4_7_3_8_4, 0.4_9_5_3_0, 0.5_1_8_9_4, 0.4_9_8_1_4, 0.4_7_9_8_4, 0.3_8_9_5_8, 0.4_4_2_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase( self : Optional[Any] ):
a_ : Tuple = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
a_ : Tuple = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
a_ : Tuple = self.get_dummy_inputs()
a_ : Optional[int] = pipe(**lowerCamelCase_ ).images
a_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
a_ : List[Any] = np.array([0.5_3_8_9_5, 0.6_0_8_0_8, 0.4_7_9_3_3, 0.4_9_6_0_8, 0.5_1_8_8_6, 0.4_9_9_5_0, 0.4_8_0_5_3, 0.3_8_9_5_7, 0.4_4_2_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase( self : Optional[Any] ):
a_ : int = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
a_ : List[str] = self.get_dummy_inputs()
a_ : Optional[int] = 3 * [inputs["""prompt"""]]
# forward
a_ : Optional[int] = pipe(**lowerCamelCase_ )
a_ : Union[str, Any] = output.images[0, -3:, -3:, -1]
a_ : Tuple = self.get_dummy_inputs()
a_ : List[Any] = 3 * [inputs.pop("""prompt""" )]
a_ : List[Any] = pipe.tokenizer(
lowerCamelCase_ , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=lowerCamelCase_ , return_tensors="""np""" , )
a_ : str = text_inputs["""input_ids"""]
a_ : Dict = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
a_ : List[Any] = prompt_embeds
# forward
a_ : Tuple = pipe(**lowerCamelCase_ )
a_ : Union[str, Any] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
def UpperCAmelCase( self : List[str] ):
a_ : Any = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
a_ : int = self.get_dummy_inputs()
a_ : str = 3 * ["""this is a negative prompt"""]
a_ : int = negative_prompt
a_ : Dict = 3 * [inputs["""prompt"""]]
# forward
a_ : Tuple = pipe(**lowerCamelCase_ )
a_ : Union[str, Any] = output.images[0, -3:, -3:, -1]
a_ : Tuple = self.get_dummy_inputs()
a_ : Optional[int] = 3 * [inputs.pop("""prompt""" )]
a_ : str = []
for p in [prompt, negative_prompt]:
a_ : Optional[int] = pipe.tokenizer(
lowerCamelCase_ , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=lowerCamelCase_ , return_tensors="""np""" , )
a_ : int = text_inputs["""input_ids"""]
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
a_ , a_ : Union[str, Any] = embeds
# forward
a_ : Optional[Any] = pipe(**lowerCamelCase_ )
a_ : int = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@nightly
@require_onnxruntime
@require_torch_gpu
class a__ ( unittest.TestCase ):
@property
def UpperCAmelCase( self : Tuple ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCAmelCase( self : List[Any] ):
a_ : List[str] = ort.SessionOptions()
a_ : List[str] = False
return options
def UpperCAmelCase( self : Optional[Any] ):
# using the PNDM scheduler by default
a_ : int = OnnxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
a_ : Tuple = """A painting of a squirrel eating a burger"""
np.random.seed(0 )
a_ : int = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=1_0 , output_type="""np""" )
a_ : Optional[int] = output.images
a_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a_ : Optional[int] = np.array([0.0_4_5_2, 0.0_3_9_0, 0.0_0_8_7, 0.0_3_5_0, 0.0_6_1_7, 0.0_3_6_4, 0.0_5_4_4, 0.0_5_2_3, 0.0_7_2_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase( self : str ):
a_ : str = DDIMScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
a_ : Any = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
a_ : Union[str, Any] = """open neural network exchange"""
a_ : str = np.random.RandomState(0 )
a_ : str = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=1_0 , generator=lowerCamelCase_ , output_type="""np""" )
a_ : str = output.images
a_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a_ : List[str] = np.array([0.2_8_6_7, 0.1_9_7_4, 0.1_4_8_1, 0.7_2_9_4, 0.7_2_5_1, 0.6_6_6_7, 0.4_1_9_4, 0.5_6_4_2, 0.6_4_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase( self : Optional[int] ):
a_ : Optional[int] = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
a_ : List[str] = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
a_ : Optional[int] = """open neural network exchange"""
a_ : Tuple = np.random.RandomState(0 )
a_ : Tuple = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=1_0 , generator=lowerCamelCase_ , output_type="""np""" )
a_ : Tuple = output.images
a_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a_ : str = np.array([0.2_3_0_6, 0.1_9_5_9, 0.1_5_9_3, 0.6_5_4_9, 0.6_3_9_4, 0.5_4_0_8, 0.5_0_6_5, 0.6_0_1_0, 0.6_1_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase( self : Optional[Any] ):
a_ : List[str] = 0
def test_callback_fn(lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : np.ndarray ) -> None:
a_ : List[Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 6_4, 6_4)
a_ : Union[str, Any] = latents[0, -3:, -3:, -1]
a_ : Optional[Any] = np.array(
[-0.6_7_7_2, -0.3_8_3_5, -1.2_4_5_6, 0.1_9_0_5, -1.0_9_7_4, 0.6_9_6_7, -1.9_3_5_3, 0.0_1_7_8, 1.0_1_6_7] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
elif step == 5:
assert latents.shape == (1, 4, 6_4, 6_4)
a_ : List[str] = latents[0, -3:, -3:, -1]
a_ : int = np.array(
[-0.3_3_5_1, 0.2_2_4_1, -0.1_8_3_7, -0.2_3_2_5, -0.6_5_7_7, 0.3_3_9_3, -0.0_2_4_1, 0.5_8_9_9, 1.3_8_7_5] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
a_ : Optional[int] = False
a_ : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
a_ : List[str] = """Andromeda galaxy in a bottle"""
a_ : Union[str, Any] = np.random.RandomState(0 )
pipe(
prompt=lowerCamelCase_ , num_inference_steps=5 , guidance_scale=7.5 , generator=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def UpperCAmelCase( self : Dict ):
a_ : Tuple = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert pipe.safety_checker is None
a_ : Union[str, Any] = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase_ )
a_ : Tuple = OnnxStableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
a_ : Any = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
| 478 | 1 |
'''simple docstring'''
def UpperCamelCase__ ( __magic_name__ : int , __magic_name__ : int ) -> int:
'''simple docstring'''
return int(input_a == input_a == 0 )
def UpperCamelCase__ ( ) -> None:
'''simple docstring'''
print("""Truth Table of NOR Gate:""" )
print("""| Input 1 | Input 2 | Output |""" )
print(f"| 0 | 0 | {nor_gate(0 , 0 )} |" )
print(f"| 0 | 1 | {nor_gate(0 , 1 )} |" )
print(f"| 1 | 0 | {nor_gate(1 , 0 )} |" )
print(f"| 1 | 1 | {nor_gate(1 , 1 )} |" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 38 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case : List[Any] = logging.get_logger(__name__)
snake_case : Dict = {
'''uclanlp/visualbert-vqa''': '''https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-pre''': '''https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-vcr''': '''https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-pre''': '''https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-nlvr2''': '''https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-pre''': '''https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'''
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = 'visual_bert'
def __init__( self , _lowerCamelCase=3_0522 , _lowerCamelCase=768 , _lowerCamelCase=512 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-12 , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=2 , **_lowerCamelCase , ):
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
a :Any = vocab_size
a :str = max_position_embeddings
a :str = hidden_size
a :List[Any] = visual_embedding_dim
a :str = num_hidden_layers
a :Optional[int] = num_attention_heads
a :int = intermediate_size
a :int = hidden_act
a :Union[str, Any] = hidden_dropout_prob
a :List[Any] = attention_probs_dropout_prob
a :int = initializer_range
a :List[Any] = type_vocab_size
a :str = layer_norm_eps
a :Optional[int] = bypass_transformer
a :str = special_visual_initialize
| 445 | 0 |
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
A_ = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class __lowercase ( datasets.BuilderConfig ):
lowercase = None
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase, )-> Dict:
"""simple docstring"""
import pyspark
def generate_fn():
lowercase = df.select('''*''', pyspark.sql.functions.spark_partition_id().alias('''part_id''' ) )
for partition_id in partition_order:
lowercase = df_with_partition_id.select('''*''' ).where(f'part_id = {partition_id}' ).drop('''part_id''' )
lowercase = partition_df.collect()
lowercase = 0
for row in rows:
yield f'{partition_id}_{row_id}', row.asDict()
row_id += 1
return generate_fn
class __lowercase ( _BaseExamplesIterable ):
def __init__( self : Union[str, Any] , __lowerCamelCase : "pyspark.sql.DataFrame" , __lowerCamelCase : List[str]=None , ) -> Optional[int]:
'''simple docstring'''
lowercase = df
lowercase = partition_order or range(self.df.rdd.getNumPartitions() )
lowercase = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : Any ) -> Optional[int]:
'''simple docstring'''
yield from self.generate_examples_fn()
def __a ( self : Dict , __lowerCamelCase : np.random.Generator ) -> "SparkExamplesIterable":
'''simple docstring'''
lowercase = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(__lowerCamelCase )
return SparkExamplesIterable(self.df , partition_order=__lowerCamelCase )
def __a ( self : List[Any] , __lowerCamelCase : int , __lowerCamelCase : int ) -> "SparkExamplesIterable":
'''simple docstring'''
lowercase = self.split_shard_indices_by_worker(__lowerCamelCase , __lowerCamelCase )
return SparkExamplesIterable(self.df , partition_order=__lowerCamelCase )
@property
def __a ( self : Union[str, Any] ) -> int:
'''simple docstring'''
return len(self.partition_order )
class __lowercase ( datasets.DatasetBuilder ):
lowercase = SparkConfig
def __init__( self : Any , __lowerCamelCase : "pyspark.sql.DataFrame" , __lowerCamelCase : str = None , __lowerCamelCase : str = None , **__lowerCamelCase : List[Any] , ) -> Optional[int]:
'''simple docstring'''
import pyspark
lowercase = pyspark.sql.SparkSession.builder.getOrCreate()
lowercase = df
lowercase = working_dir
super().__init__(
cache_dir=__lowerCamelCase , config_name=str(self.df.semanticHash() ) , **__lowerCamelCase , )
def __a ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
def create_cache_and_write_probe(__lowerCamelCase : Dict ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=__lowerCamelCase )
lowercase = os.path.join(self._cache_dir , '''fs_test''' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(__lowerCamelCase , '''a''' )
return [probe_file]
if self._spark.conf.get('''spark.master''' , '''''' ).startswith('''local''' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
lowercase = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(__lowerCamelCase ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'''When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir''' )
def __a ( self : str ) -> Tuple:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def __a ( self : List[str] , __lowerCamelCase : datasets.download.download_manager.DownloadManager ) -> List[Any]:
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def __a ( self : Optional[int] , __lowerCamelCase : Dict ) -> List[Any]:
'''simple docstring'''
import pyspark
def get_arrow_batch_size(__lowerCamelCase : Dict ):
for batch in it:
yield pa.RecordBatch.from_pydict({'''batch_bytes''': [batch.nbytes]} )
lowercase = self.df.count()
lowercase = df_num_rows if df_num_rows <= 1_00 else 1_00
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
lowercase = (
self.df.limit(__lowerCamelCase )
.repartition(1 )
.mapInArrow(__lowerCamelCase , '''batch_bytes: long''' )
.agg(pyspark.sql.functions.sum('''batch_bytes''' ).alias('''sample_bytes''' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
lowercase = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
lowercase = min(__lowerCamelCase , int(approx_total_size / max_shard_size ) )
lowercase = self.df.repartition(__lowerCamelCase )
def __a ( self : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : int , ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
'''simple docstring'''
import pyspark
lowercase = ParquetWriter if file_format == '''parquet''' else ArrowWriter
lowercase = os.path.join(self._working_dir , os.path.basename(__lowerCamelCase ) ) if self._working_dir else fpath
lowercase = file_format == '''parquet'''
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
lowercase = self.config.features
lowercase = self._writer_batch_size
lowercase = self._fs.storage_options
def write_arrow(__lowerCamelCase : Dict ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
lowercase = pyspark.TaskContext().taskAttemptId()
lowercase = next(__lowerCamelCase , __lowerCamelCase )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
lowercase = 0
lowercase = writer_class(
features=__lowerCamelCase , path=working_fpath.replace('''SSSSS''' , f'{shard_id:05d}' ).replace('''TTTTT''' , f'{task_id:05d}' ) , writer_batch_size=__lowerCamelCase , storage_options=__lowerCamelCase , embed_local_files=__lowerCamelCase , )
lowercase = pa.Table.from_batches([first_batch] )
writer.write_table(__lowerCamelCase )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
lowercase ,lowercase = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
shard_id += 1
lowercase = writer_class(
features=writer._features , path=working_fpath.replace('''SSSSS''' , f'{shard_id:05d}' ).replace('''TTTTT''' , f'{task_id:05d}' ) , writer_batch_size=__lowerCamelCase , storage_options=__lowerCamelCase , embed_local_files=__lowerCamelCase , )
lowercase = pa.Table.from_batches([batch] )
writer.write_table(__lowerCamelCase )
if writer._num_bytes > 0:
lowercase ,lowercase = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(__lowerCamelCase ) ):
lowercase = os.path.join(os.path.dirname(__lowerCamelCase ) , os.path.basename(__lowerCamelCase ) )
shutil.move(__lowerCamelCase , __lowerCamelCase )
lowercase = (
self.df.mapInArrow(__lowerCamelCase , '''task_id: long, num_examples: long, num_bytes: long''' )
.groupBy('''task_id''' )
.agg(
pyspark.sql.functions.sum('''num_examples''' ).alias('''total_num_examples''' ) , pyspark.sql.functions.sum('''num_bytes''' ).alias('''total_num_bytes''' ) , pyspark.sql.functions.count('''num_bytes''' ).alias('''num_shards''' ) , pyspark.sql.functions.collect_list('''num_examples''' ).alias('''shard_lengths''' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def __a ( self : Optional[Any] , __lowerCamelCase : "datasets.SplitGenerator" , __lowerCamelCase : str = "arrow" , __lowerCamelCase : Optional[Union[str, int]] = None , __lowerCamelCase : Optional[int] = None , **__lowerCamelCase : Optional[int] , ) -> Tuple:
'''simple docstring'''
self._validate_cache_dir()
lowercase = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(__lowerCamelCase )
lowercase = not is_remote_filesystem(self._fs )
lowercase = os.path.join if is_local else posixpath.join
lowercase = '''-TTTTT-SSSSS-of-NNNNN'''
lowercase = f'{self.name}-{split_generator.name}{SUFFIX}.{file_format}'
lowercase = path_join(self._output_dir , __lowerCamelCase )
lowercase = 0
lowercase = 0
lowercase = 0
lowercase = []
lowercase = []
for task_id, content in self._prepare_split_single(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
(
(
lowercase
) ,(
lowercase
) ,(
lowercase
) ,(
lowercase
) ,
) = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(__lowerCamelCase )
lowercase = total_num_examples
lowercase = total_num_bytes
# should rename everything at the end
logger.debug(f'Renaming {total_shards} shards.' )
if total_shards > 1:
lowercase = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
lowercase = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
__lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , ):
rename(
__lowerCamelCase , fpath.replace('''SSSSS''' , f'{shard_id:05d}' ).replace('''TTTTT''' , f'{task_id:05d}' ) , fpath.replace('''TTTTT-SSSSS''' , f'{global_shard_id:05d}' ).replace('''NNNNN''' , f'{total_shards:05d}' ) , )
lowercase = []
lowercase = 0
for i in range(len(__lowerCamelCase ) ):
lowercase ,lowercase = task_id_and_num_shards[i]
for shard_id in range(__lowerCamelCase ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(__lowerCamelCase , len(__lowerCamelCase ) ).map(lambda __lowerCamelCase : _rename_shard(*__lowerCamelCase ) ).collect()
else:
# don't use any pattern
lowercase = 0
lowercase = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('''SSSSS''' , f'{shard_id:05d}' ).replace('''TTTTT''' , f'{task_id:05d}' ) , fpath.replace(__lowerCamelCase , '''''' ) , )
def __a ( self : Union[str, Any] , __lowerCamelCase : "datasets.SplitGenerator" , ) -> SparkExamplesIterable:
'''simple docstring'''
return SparkExamplesIterable(self.df )
| 479 | from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class __lowercase :
# setable values
lowercase = None
lowercase = None
lowercase = None # sigma(t_i)
@classmethod
def __a ( cls : List[str] ) -> Dict:
'''simple docstring'''
return cls()
@dataclass
class __lowercase ( _A ):
lowercase = 42
lowercase = 42
lowercase = 42
class __lowercase ( _A , _A ):
@property
def __a ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return True
@register_to_config
def __init__( self : int , __lowerCamelCase : float = 0.02 , __lowerCamelCase : float = 1_00 , __lowerCamelCase : float = 1.007 , __lowerCamelCase : float = 80 , __lowerCamelCase : float = 0.05 , __lowerCamelCase : float = 50 , ) -> List[Any]:
'''simple docstring'''
pass
def __a ( self : Dict ) -> Optional[int]:
'''simple docstring'''
return KarrasVeSchedulerState.create()
def __a ( self : str , __lowerCamelCase : KarrasVeSchedulerState , __lowerCamelCase : int , __lowerCamelCase : Tuple = () ) -> KarrasVeSchedulerState:
'''simple docstring'''
lowercase = jnp.arange(0 , __lowerCamelCase )[::-1].copy()
lowercase = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=__lowerCamelCase , schedule=jnp.array(__lowerCamelCase , dtype=jnp.floataa ) , timesteps=__lowerCamelCase , )
def __a ( self : Optional[Any] , __lowerCamelCase : KarrasVeSchedulerState , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : float , __lowerCamelCase : random.KeyArray , ) -> Tuple[jnp.ndarray, float]:
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
lowercase = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
lowercase = 0
# sample eps ~ N(0, S_noise^2 * I)
lowercase = random.split(__lowerCamelCase , num=1 )
lowercase = self.config.s_noise * random.normal(key=__lowerCamelCase , shape=sample.shape )
lowercase = sigma + gamma * sigma
lowercase = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __a ( self : str , __lowerCamelCase : KarrasVeSchedulerState , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : bool = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
'''simple docstring'''
lowercase = sample_hat + sigma_hat * model_output
lowercase = (sample_hat - pred_original_sample) / sigma_hat
lowercase = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__lowerCamelCase , derivative=__lowerCamelCase , state=__lowerCamelCase )
def __a ( self : str , __lowerCamelCase : KarrasVeSchedulerState , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : bool = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
'''simple docstring'''
lowercase = sample_prev + sigma_prev * model_output
lowercase = (sample_prev - pred_original_sample) / sigma_prev
lowercase = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__lowerCamelCase , derivative=__lowerCamelCase , state=__lowerCamelCase )
def __a ( self : int , __lowerCamelCase : KarrasVeSchedulerState , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : Dict ) -> Tuple:
'''simple docstring'''
raise NotImplementedError()
| 479 | 1 |
def __lowerCAmelCase ( _A ,_A ,_A ,_A ,_A ,_A ):
"""simple docstring"""
if index == r:
for j in range(_A ):
print(data[j] ,end=""" """ )
print(""" """ )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
_lowercase = arr[i]
combination_util(_A ,_A ,_A ,index + 1 ,_A ,i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(_A ,_A ,_A ,_A ,_A ,i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def __lowerCAmelCase ( _A ,_A ,_A ):
"""simple docstring"""
_lowercase = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(_A ,_A ,_A ,0 ,_A ,0 )
if __name__ == "__main__":
# Driver code to check the function above
A_: str = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 398 | def __lowerCAmelCase ( _A ):
"""simple docstring"""
if not isinstance(_A ,_A ):
_lowercase = f'''Input value of [number={number}] must be an integer'''
raise TypeError(_A )
if number < 0:
return False
_lowercase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 398 | 1 |
import math
import random
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : float , UpperCamelCase : bool = False ) -> float:
"""simple docstring"""
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
_A = 0.02
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : int , UpperCamelCase : int ) -> float:
"""simple docstring"""
a_ = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(UpperCamelCase ):
# Forward propagation
a_ = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
a_ = (expected / 100) - layer_a
# Error delta
a_ = layer_1_error * sigmoid_function(UpperCamelCase , UpperCamelCase )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
_A = int(input('Expected value: '))
_A = int(input('Number of propagations: '))
print(forward_propagation(expected, number_propagations)) | 705 |
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
_A = logging.getLogger()
def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
"""simple docstring"""
a_ = argparse.ArgumentParser()
parser.add_argument("""-f""" )
a_ = parser.parse_args()
return args.f
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : int ) -> Optional[Any]:
"""simple docstring"""
a_ = {}
a_ = os.path.join(UpperCamelCase , """all_results.json""" )
if os.path.exists(UpperCamelCase ):
with open(UpperCamelCase , """r""" ) as f:
a_ = json.load(UpperCamelCase )
else:
raise ValueError(F"""can't find {path}""" )
return results
def __SCREAMING_SNAKE_CASE ( ) -> Any:
"""simple docstring"""
a_ = torch.cuda.is_available() and torch_device == """cuda"""
return is_using_cuda and is_apex_available()
_A = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
@classmethod
def __magic_name__ ( cls ):
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
a_ = tempfile.mkdtemp()
a_ = os.path.join(cls.tmpdir , """default_config.yml""" )
write_basic_config(save_location=cls.configPath )
a_ = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def __magic_name__ ( cls ):
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __magic_name__ ( self ):
a_ = self.get_auto_remove_tmp_dir()
a_ = f"""
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
""".split()
if is_cuda_and_apex_available():
testargs.append("""--fp16""" )
run_command(self._launch_args + testargs )
a_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.7_5 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """glue_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __magic_name__ ( self ):
a_ = self.get_auto_remove_tmp_dir()
a_ = f"""
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
""".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
a_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertLess(result["""perplexity"""] , 100 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """clm_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __magic_name__ ( self ):
a_ = self.get_auto_remove_tmp_dir()
a_ = f"""
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertLess(result["""perplexity"""] , 42 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """mlm_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __magic_name__ ( self ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
a_ = 7 if get_gpu_count() > 1 else 2
a_ = self.get_auto_remove_tmp_dir()
a_ = f"""
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.7_5 )
self.assertLess(result["""train_loss"""] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """ner_no_trainer""" ) ) )
@unittest.skip(reason="""Fix me @muellerzr""" )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __magic_name__ ( self ):
a_ = self.get_auto_remove_tmp_dir()
a_ = f"""
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a_ = get_results(_SCREAMING_SNAKE_CASE )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["""eval_f1"""] , 28 )
self.assertGreaterEqual(result["""eval_exact"""] , 28 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """qa_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __magic_name__ ( self ):
a_ = self.get_auto_remove_tmp_dir()
a_ = f"""
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """swag_no_trainer""" ) ) )
@slow
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __magic_name__ ( self ):
a_ = self.get_auto_remove_tmp_dir()
a_ = f"""
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["""eval_rouge1"""] , 10 )
self.assertGreaterEqual(result["""eval_rouge2"""] , 2 )
self.assertGreaterEqual(result["""eval_rougeL"""] , 7 )
self.assertGreaterEqual(result["""eval_rougeLsum"""] , 7 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """summarization_no_trainer""" ) ) )
@slow
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __magic_name__ ( self ):
a_ = self.get_auto_remove_tmp_dir()
a_ = f"""
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["""eval_bleu"""] , 30 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """translation_no_trainer""" ) ) )
@slow
def __magic_name__ ( self ):
a_ = logging.StreamHandler(sys.stdout )
logger.addHandler(_SCREAMING_SNAKE_CASE )
a_ = self.get_auto_remove_tmp_dir()
a_ = f"""
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
""".split()
run_command(self._launch_args + testargs )
a_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["""eval_overall_accuracy"""] , 0.1_0 )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __magic_name__ ( self ):
a_ = self.get_auto_remove_tmp_dir()
a_ = f"""
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
""".split()
if is_cuda_and_apex_available():
testargs.append("""--fp16""" )
run_command(self._launch_args + testargs )
a_ = get_results(_SCREAMING_SNAKE_CASE )
# The base model scores a 25%
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """step_1""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """image_classification_no_trainer""" ) ) ) | 403 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Tuple = logging.get_logger(__name__)
_a : List[Any] = {
"s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json",
}
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = "open-llama"
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Dict=10_0000 , SCREAMING_SNAKE_CASE_ : int=4096 , SCREAMING_SNAKE_CASE_ : str=1_1008 , SCREAMING_SNAKE_CASE_ : List[str]=32 , SCREAMING_SNAKE_CASE_ : Dict=32 , SCREAMING_SNAKE_CASE_ : Optional[int]="silu" , SCREAMING_SNAKE_CASE_ : Dict=2048 , SCREAMING_SNAKE_CASE_ : Optional[int]=0.0_2 , SCREAMING_SNAKE_CASE_ : int=1e-6 , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : Tuple=0 , SCREAMING_SNAKE_CASE_ : Any=1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : Optional[int]=False , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : Dict=0.1 , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , SCREAMING_SNAKE_CASE_ : Tuple=None , **SCREAMING_SNAKE_CASE_ : Any , ) -> Optional[Any]:
__snake_case = vocab_size
__snake_case = max_position_embeddings
__snake_case = hidden_size
__snake_case = intermediate_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = initializer_range
__snake_case = rms_norm_eps
__snake_case = use_cache
__snake_case = kwargs.pop(
'use_memorry_efficient_attention' , SCREAMING_SNAKE_CASE_ )
__snake_case = hidden_dropout_prob
__snake_case = attention_dropout_prob
__snake_case = use_stable_embedding
__snake_case = shared_input_output_embedding
__snake_case = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , tie_word_embeddings=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
def a ( self : List[Any] ) -> List[Any]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , SCREAMING_SNAKE_CASE_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f'got {self.rope_scaling}' )
__snake_case = self.rope_scaling.get('type' , SCREAMING_SNAKE_CASE_ )
__snake_case = self.rope_scaling.get('factor' , SCREAMING_SNAKE_CASE_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 56 |
from __future__ import annotations
def lowerCAmelCase_ ( __UpperCAmelCase: list[int | str] ) -> None:
create_state_space_tree(__UpperCAmelCase , [] , 0 , [0 for i in range(len(__UpperCAmelCase ) )] )
def lowerCAmelCase_ ( __UpperCAmelCase: list[int | str] , __UpperCAmelCase: list[int | str] , __UpperCAmelCase: int , __UpperCAmelCase: list[int] , ) -> None:
if index == len(__UpperCAmelCase ):
print(__UpperCAmelCase )
return
for i in range(len(__UpperCAmelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
UpperCamelCase__ : List[Any] = True
create_state_space_tree(__UpperCAmelCase , __UpperCAmelCase , index + 1 , __UpperCAmelCase )
current_sequence.pop()
UpperCamelCase__ : Union[str, Any] = False
UpperCAmelCase_ = [3, 1, 2, 4]
generate_all_permutations(sequence)
UpperCAmelCase_ = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 253 | 0 |
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
lowerCamelCase : dict[str, TrieNode] = {} # Mapping from char to TrieNode
lowerCamelCase : List[Any] = False
def _snake_case ( self , __A ):
"""simple docstring"""
for word in words:
self.insert(__A )
def _snake_case ( self , __A ):
"""simple docstring"""
lowerCamelCase : List[Any] = self
for char in word:
if char not in curr.nodes:
lowerCamelCase : List[Any] = TrieNode()
lowerCamelCase : int = curr.nodes[char]
lowerCamelCase : Union[str, Any] = True
def _snake_case ( self , __A ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = self
for char in word:
if char not in curr.nodes:
return False
lowerCamelCase : Dict = curr.nodes[char]
return curr.is_leaf
def _snake_case ( self , __A ):
"""simple docstring"""
def _delete(__A , __A , __A ) -> bool:
if index == len(__A ):
# If word does not exist
if not curr.is_leaf:
return False
lowerCamelCase : Optional[Any] = False
return len(curr.nodes ) == 0
lowerCamelCase : str = word[index]
lowerCamelCase : str = curr.nodes.get(__A )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
lowerCamelCase : int = _delete(__A , __A , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , __A , 0 )
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if node.is_leaf:
print(SCREAMING_SNAKE_CASE_ , end=" " )
for key, value in node.nodes.items():
print_words(SCREAMING_SNAKE_CASE_ , word + key )
def lowercase_( ):
'''simple docstring'''
lowerCamelCase : int = """banana bananas bandana band apple all beast""".split()
lowerCamelCase : int = TrieNode()
root.insert_many(SCREAMING_SNAKE_CASE_ )
# print_words(root, "")
assert all(root.find(SCREAMING_SNAKE_CASE_ ) for word in words )
assert root.find("banana" )
assert not root.find("bandanas" )
assert not root.find("apps" )
assert root.find("apple" )
assert root.find("all" )
root.delete("all" )
assert not root.find("all" )
root.delete("banana" )
assert not root.find("banana" )
assert root.find("bananas" )
return True
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
print(str(SCREAMING_SNAKE_CASE_ ) , "works!" if passes else "doesn't work :(" )
def lowercase_( ):
'''simple docstring'''
assert test_trie()
def lowercase_( ):
'''simple docstring'''
print_results("Testing trie functionality" , test_trie() )
if __name__ == "__main__":
main()
| 714 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
_snake_case = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __A=False , __A=False , __A=6.0 , __A=None , __A=False , __A=False , __A=None , __A="fp4" , __A=False , **__A , ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = load_in_abit
lowerCamelCase : List[Any] = load_in_abit
lowerCamelCase : List[str] = llm_inta_threshold
lowerCamelCase : Dict = llm_inta_skip_modules
lowerCamelCase : Optional[int] = llm_inta_enable_fpaa_cpu_offload
lowerCamelCase : int = llm_inta_has_fpaa_weight
lowerCamelCase : Tuple = bnb_abit_quant_type
lowerCamelCase : str = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
lowerCamelCase : Dict = torch.floataa
elif isinstance(__A , __A ):
lowerCamelCase : Optional[int] = getattr(__A , __A )
elif isinstance(__A , torch.dtype ):
lowerCamelCase : str = bnb_abit_compute_dtype
else:
raise ValueError("bnb_4bit_compute_dtype must be a string or a torch.dtype" )
self.post_init()
def _snake_case ( self ):
"""simple docstring"""
if not isinstance(self.llm_inta_threshold , __A ):
raise ValueError("llm_int8_threshold must be a float" )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , __A ):
raise ValueError("llm_int8_skip_modules must be a list of strings" )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , __A ):
raise ValueError("llm_int8_enable_fp32_cpu_offload must be a boolean" )
if not isinstance(self.llm_inta_has_fpaa_weight , __A ):
raise ValueError("llm_int8_has_fp16_weight must be a boolean" )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError("bnb_4bit_compute_dtype must be torch.dtype" )
if not isinstance(self.bnb_abit_quant_type , __A ):
raise ValueError("bnb_4bit_quant_type must be a string" )
if not isinstance(self.bnb_abit_use_double_quant , __A ):
raise ValueError("bnb_4bit_use_double_quant must be a boolean" )
if self.load_in_abit and not version.parse(importlib.metadata.version("bitsandbytes" ) ) >= version.parse(
"0.39.0" ):
raise ValueError(
"4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version" )
def _snake_case ( self ):
"""simple docstring"""
return self.load_in_abit or self.load_in_abit
def _snake_case ( self ):
"""simple docstring"""
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def _snake_case ( cls , __A , __A , **__A ):
"""simple docstring"""
lowerCamelCase : Tuple = cls(**__A )
lowerCamelCase : Union[str, Any] = []
for key, value in kwargs.items():
if hasattr(__A , __A ):
setattr(__A , __A , __A )
to_remove.append(__A )
for key in to_remove:
kwargs.pop(__A , __A )
if return_unused_kwargs:
return config, kwargs
else:
return config
def _snake_case ( self , __A ):
"""simple docstring"""
with open(__A , "w" , encoding="utf-8" ) as writer:
lowerCamelCase : str = self.to_dict()
lowerCamelCase : Any = json.dumps(__A , indent=2 , sort_keys=__A ) + "\n"
writer.write(__A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Tuple = copy.deepcopy(self.__dict__ )
lowerCamelCase : Optional[Any] = str(output["bnb_4bit_compute_dtype"] ).split("." )[1]
return output
def __repr__( self ):
"""simple docstring"""
return F"""{self.__class__.__name__} {self.to_json_string()}"""
def _snake_case ( self , __A = True ):
"""simple docstring"""
if use_diff is True:
lowerCamelCase : Optional[int] = self.to_diff_dict()
else:
lowerCamelCase : List[str] = self.to_dict()
return json.dumps(__A , indent=2 , sort_keys=__A ) + "\n"
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : int = self.to_dict()
# get the default config dict
lowerCamelCase : Union[str, Any] = BitsAndBytesConfig().to_dict()
lowerCamelCase : Union[str, Any] = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
lowerCamelCase : List[str] = value
return serializable_config_dict
| 231 | 0 |
'''simple docstring'''
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def __UpperCAmelCase ( _UpperCAmelCase : str ) -> int:
__snake_case = fname.split(os.path.sep )[-1]
return re.search(R"^(.*)_\d+\.jpg$" , _UpperCAmelCase ).groups()[0]
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def __init__( self : Optional[Any] , a_ : str , a_ : Union[str, Any]=None , a_ : str=None ):
"""simple docstring"""
__snake_case = file_names
__snake_case = image_transform
__snake_case = label_to_id
def __len__( self : str ):
"""simple docstring"""
return len(self.file_names )
def __getitem__( self : int , a_ : str ):
"""simple docstring"""
__snake_case = self.file_names[idx]
__snake_case = PIL.Image.open(a_ )
__snake_case = raw_image.convert("RGB" )
if self.image_transform is not None:
__snake_case = self.image_transform(a_ )
__snake_case = extract_label(a_ )
if self.label_to_id is not None:
__snake_case = self.label_to_id[label]
return {"image": image, "label": label}
def __UpperCAmelCase ( _UpperCAmelCase : Dict , _UpperCAmelCase : Dict ) -> List[Any]:
# Initialize accelerator
if args.with_tracking:
__snake_case = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
__snake_case = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__snake_case = config["lr"]
__snake_case = int(config["num_epochs"] )
__snake_case = int(config["seed"] )
__snake_case = int(config["batch_size"] )
__snake_case = config["image_size"]
if not isinstance(_UpperCAmelCase , (list, tuple) ):
__snake_case = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , "isdigit" ):
if args.checkpointing_steps == "epoch":
__snake_case = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
__snake_case = int(args.checkpointing_steps )
else:
raise ValueError(
F'''Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.''' )
else:
__snake_case = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
__snake_case = os.path.split(_UpperCAmelCase )[-1].split("." )[0]
accelerator.init_trackers(_UpperCAmelCase , _UpperCAmelCase )
# Grab all the image filenames
__snake_case = [os.path.join(args.data_dir , _UpperCAmelCase ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
__snake_case = [extract_label(_UpperCAmelCase ) for fname in file_names]
__snake_case = list(set(_UpperCAmelCase ) )
id_to_label.sort()
__snake_case = {lbl: i for i, lbl in enumerate(_UpperCAmelCase )}
# Set the seed before splitting the data.
np.random.seed(_UpperCAmelCase )
torch.manual_seed(_UpperCAmelCase )
torch.cuda.manual_seed_all(_UpperCAmelCase )
# Split our filenames between train and validation
__snake_case = np.random.permutation(len(_UpperCAmelCase ) )
__snake_case = int(0.8 * len(_UpperCAmelCase ) )
__snake_case = random_perm[:cut]
__snake_case = random_perm[cut:]
# For training we use a simple RandomResizedCrop
__snake_case = Compose([RandomResizedCrop(_UpperCAmelCase , scale=(0.5, 1.0) ), ToTensor()] )
__snake_case = PetsDataset(
[file_names[i] for i in train_split] , image_transform=_UpperCAmelCase , label_to_id=_UpperCAmelCase )
# For evaluation, we use a deterministic Resize
__snake_case = Compose([Resize(_UpperCAmelCase ), ToTensor()] )
__snake_case = PetsDataset([file_names[i] for i in eval_split] , image_transform=_UpperCAmelCase , label_to_id=_UpperCAmelCase )
# Instantiate dataloaders.
__snake_case = DataLoader(_UpperCAmelCase , shuffle=_UpperCAmelCase , batch_size=_UpperCAmelCase , num_workers=4 )
__snake_case = DataLoader(_UpperCAmelCase , shuffle=_UpperCAmelCase , batch_size=_UpperCAmelCase , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__snake_case = create_model("resnet50d" , pretrained=_UpperCAmelCase , num_classes=len(_UpperCAmelCase ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__snake_case = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
__snake_case = False
for param in model.get_classifier().parameters():
__snake_case = True
# We normalize the batches of images to be a bit faster.
__snake_case = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
__snake_case = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
__snake_case = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
__snake_case = OneCycleLR(optimizer=_UpperCAmelCase , max_lr=_UpperCAmelCase , epochs=_UpperCAmelCase , steps_per_epoch=len(_UpperCAmelCase ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# We need to keep track of how many total steps we have iterated over
__snake_case = 0
# We also need to keep track of the starting epoch so files are named properly
__snake_case = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F'''Resumed from checkpoint: {args.resume_from_checkpoint}''' )
accelerator.load_state(args.resume_from_checkpoint )
__snake_case = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
__snake_case = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
__snake_case = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
__snake_case = os.path.splitext(_UpperCAmelCase )[0]
if "epoch" in training_difference:
__snake_case = int(training_difference.replace("epoch_" , "" ) ) + 1
__snake_case = None
else:
__snake_case = int(training_difference.replace("step_" , "" ) )
__snake_case = resume_step // len(_UpperCAmelCase )
resume_step -= starting_epoch * len(_UpperCAmelCase )
# Now we train the model
for epoch in range(_UpperCAmelCase , _UpperCAmelCase ):
model.train()
if args.with_tracking:
__snake_case = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
__snake_case = accelerator.skip_first_batches(_UpperCAmelCase , _UpperCAmelCase )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
__snake_case = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
__snake_case = {k: v.to(accelerator.device ) for k, v in batch.items()}
__snake_case = (batch["image"] - mean) / std
__snake_case = model(_UpperCAmelCase )
__snake_case = torch.nn.functional.cross_entropy(_UpperCAmelCase , batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(_UpperCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__snake_case = F'''step_{overall_step}'''
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
__snake_case = os.path.join(args.output_dir , _UpperCAmelCase )
accelerator.save_state(_UpperCAmelCase )
model.eval()
__snake_case = 0
__snake_case = 0
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
__snake_case = {k: v.to(accelerator.device ) for k, v in batch.items()}
__snake_case = (batch["image"] - mean) / std
with torch.no_grad():
__snake_case = model(_UpperCAmelCase )
__snake_case = outputs.argmax(dim=-1 )
__snake_case , __snake_case = accelerator.gather_for_metrics((predictions, batch["label"]) )
__snake_case = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
__snake_case = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}: {1_00 * eval_metric:.2f}''' )
if args.with_tracking:
accelerator.log(
{
"accuracy": 1_00 * eval_metric,
"train_loss": total_loss.item() / len(_UpperCAmelCase ),
"epoch": epoch,
} , step=_UpperCAmelCase , )
if checkpointing_steps == "epoch":
__snake_case = F'''epoch_{epoch}'''
if args.output_dir is not None:
__snake_case = os.path.join(args.output_dir , _UpperCAmelCase )
accelerator.save_state(_UpperCAmelCase )
if args.with_tracking:
accelerator.end_training()
def __UpperCAmelCase ( ) -> str:
__snake_case = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir" , required=_UpperCAmelCase , help="The data folder on disk." )
parser.add_argument("--fp16" , action="store_true" , help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision" , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps" , type=_UpperCAmelCase , default=_UpperCAmelCase , help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch." , )
parser.add_argument(
"--output_dir" , type=_UpperCAmelCase , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=_UpperCAmelCase , default=_UpperCAmelCase , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=_UpperCAmelCase , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
__snake_case = parser.parse_args()
__snake_case = {"lr": 3E-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 2_24}
training_function(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 69 |
import numpy as np
import datasets
A__ : int = '\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n'
A__ : Optional[int] = '\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n'
A__ : Optional[int] = '\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
"""simple docstring"""
def lowercase__ ( self : str ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'''X''': datasets.Sequence(datasets.Value('''float''', id='''sequence''' ), id='''X''' ),
} ), )
def lowercase__ ( self : Dict, lowerCamelCase : int, lowerCamelCase : Optional[int] ):
'''simple docstring'''
# convert to numpy arrays
lowercase__ = np.array(lowerCamelCase )
lowercase__ = np.array(lowerCamelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError('''Expected `X` to be a 2D vector''' )
if len(reference_distribution.shape ) != 2:
raise ValueError('''Expected `reference_distribution` to be a 2D vector''' )
if reference_distribution.shape[0] < 2:
raise ValueError(
'''Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension''' )
# Get mahalanobis distance for each prediction
lowercase__ = X - np.mean(lowerCamelCase )
lowercase__ = np.cov(reference_distribution.T )
try:
lowercase__ = np.linalg.inv(lowerCamelCase )
except np.linalg.LinAlgError:
lowercase__ = np.linalg.pinv(lowerCamelCase )
lowercase__ = np.dot(lowerCamelCase, lowerCamelCase )
lowercase__ = np.dot(lowerCamelCase, X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 183 | 0 |
"""simple docstring"""
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""tensor(bool)""": np.bool_,
"""tensor(int8)""": np.inta,
"""tensor(uint8)""": np.uinta,
"""tensor(int16)""": np.intaa,
"""tensor(uint16)""": np.uintaa,
"""tensor(int32)""": np.intaa,
"""tensor(uint32)""": np.uintaa,
"""tensor(int64)""": np.intaa,
"""tensor(uint64)""": np.uintaa,
"""tensor(float16)""": np.floataa,
"""tensor(float)""": np.floataa,
"""tensor(double)""": np.floataa,
}
class _lowerCAmelCase :
def __init__( self , UpperCamelCase__=None , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." )
snake_case : Optional[int] = model
snake_case : str = kwargs.get("model_save_dir" , UpperCamelCase__ )
snake_case : List[str] = kwargs.get("latest_model_name" , UpperCamelCase__ )
def __call__( self , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
snake_case : Tuple = {k: np.array(UpperCamelCase__ ) for k, v in kwargs.items()}
return self.model.run(UpperCamelCase__ , UpperCamelCase__ )
@staticmethod
def lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None ) -> Optional[Any]:
'''simple docstring'''
if provider is None:
logger.info("No onnxruntime provider specified, using CPUExecutionProvider" )
snake_case : Optional[Any] = "CPUExecutionProvider"
return ort.InferenceSession(UpperCamelCase__ , providers=[provider] , sess_options=UpperCamelCase__ )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None , **UpperCamelCase__ ) -> int:
'''simple docstring'''
snake_case : Union[str, Any] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
snake_case : Optional[int] = self.model_save_dir.joinpath(self.latest_model_name )
snake_case : Optional[Any] = Path(UpperCamelCase__ ).joinpath(UpperCamelCase__ )
try:
shutil.copyfile(UpperCamelCase__ , UpperCamelCase__ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
snake_case : str = self.model_save_dir.joinpath(UpperCamelCase__ )
if src_path.exists():
snake_case : List[str] = Path(UpperCamelCase__ ).joinpath(UpperCamelCase__ )
try:
shutil.copyfile(UpperCamelCase__ , UpperCamelCase__ )
except shutil.SameFileError:
pass
def lowerCamelCase ( self , UpperCamelCase__ , **UpperCamelCase__ , ) -> Optional[Any]:
'''simple docstring'''
if os.path.isfile(UpperCamelCase__ ):
logger.error(F'Provided path ({save_directory}) should be a directory, not a file' )
return
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
# saving model weights/files
self._save_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
@classmethod
def lowerCamelCase ( cls , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Optional[int] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(UpperCamelCase__ ):
snake_case : Optional[int] = OnnxRuntimeModel.load_model(
os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , provider=UpperCamelCase__ , sess_options=UpperCamelCase__ )
snake_case : Optional[int] = Path(UpperCamelCase__ )
# load model from hub
else:
# download model
snake_case : Any = hf_hub_download(
repo_id=UpperCamelCase__ , filename=UpperCamelCase__ , use_auth_token=UpperCamelCase__ , revision=UpperCamelCase__ , cache_dir=UpperCamelCase__ , force_download=UpperCamelCase__ , )
snake_case : Any = Path(UpperCamelCase__ ).parent
snake_case : Tuple = Path(UpperCamelCase__ ).name
snake_case : List[Any] = OnnxRuntimeModel.load_model(UpperCamelCase__ , provider=UpperCamelCase__ , sess_options=UpperCamelCase__ )
return cls(model=UpperCamelCase__ , **UpperCamelCase__ )
@classmethod
def lowerCamelCase ( cls , UpperCamelCase__ , UpperCamelCase__ = True , UpperCamelCase__ = None , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> List[str]:
'''simple docstring'''
snake_case : Dict = None
if len(str(UpperCamelCase__ ).split("@" ) ) == 2:
snake_case : Optional[int] = model_id.split("@" )
return cls._from_pretrained(
model_id=UpperCamelCase__ , revision=UpperCamelCase__ , cache_dir=UpperCamelCase__ , force_download=UpperCamelCase__ , use_auth_token=UpperCamelCase__ , **UpperCamelCase__ , )
| 706 |
"""simple docstring"""
import numpy as np
class _lowerCAmelCase :
def __init__( self ) -> int:
'''simple docstring'''
snake_case : Optional[int] = (0, 0)
snake_case : str = None
snake_case : int = 0
snake_case : Optional[Any] = 0
snake_case : Tuple = 0
def __eq__( self , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
return self.position == cell.position
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
print(self.position )
class _lowerCAmelCase :
def __init__( self , UpperCamelCase__=(5, 5) ) -> List[Any]:
'''simple docstring'''
snake_case : Optional[Any] = np.zeros(UpperCamelCase__ )
snake_case : str = world_size[0]
snake_case : str = world_size[1]
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
print(self.w )
def lowerCamelCase ( self , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
snake_case : Optional[int] = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
snake_case : Tuple = cell.position[0]
snake_case : str = cell.position[1]
snake_case : List[Any] = []
for n in neughbour_cord:
snake_case : Any = current_x + n[0]
snake_case : List[str] = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
snake_case : Tuple = Cell()
snake_case : int = (x, y)
snake_case : List[Any] = cell
neighbours.append(UpperCamelCase__ )
return neighbours
def __lowerCAmelCase ( lowercase : Union[str, Any] , lowercase : Union[str, Any] , lowercase : Union[str, Any] ) -> Any:
"""simple docstring"""
snake_case : Tuple = []
snake_case : List[Any] = []
_open.append(lowercase )
while _open:
snake_case : Union[str, Any] = np.argmin([n.f for n in _open] )
snake_case : Tuple = _open[min_f]
_closed.append(_open.pop(lowercase ) )
if current == goal:
break
for n in world.get_neigbours(lowercase ):
for c in _closed:
if c == n:
continue
snake_case : int = current.g + 1
snake_case ,snake_case : Any = n.position
snake_case ,snake_case : Any = goal.position
snake_case : int = (ya - ya) ** 2 + (xa - xa) ** 2
snake_case : List[Any] = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(lowercase )
snake_case : str = []
while current.parent is not None:
path.append(current.position )
snake_case : List[str] = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
__snake_case = Gridworld()
# Start position and goal
__snake_case = Cell()
__snake_case = (0, 0)
__snake_case = Cell()
__snake_case = (4, 4)
print(F'''path from {start.position} to {goal.position}''')
__snake_case = astar(world, start, goal)
# Just for visual reasons.
for i in s:
__snake_case = 1
print(world.w)
| 117 | 0 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = XLMProphetNetTokenizer
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : List[Any] = True
def __lowercase ( self ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
_a : List[Any] = XLMProphetNetTokenizer(_a , keep_accents=_a )
tokenizer.save_pretrained(self.tmpdirname )
def __lowercase ( self ) -> Any:
_a : Tuple = '''[PAD]'''
_a : int = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def __lowercase ( self ) -> str:
_a : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''[PAD]''' )
self.assertEqual(vocab_keys[1] , '''[CLS]''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(_a ) , 1_0_1_2 )
def __lowercase ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_2 )
def __lowercase ( self ) -> str:
_a : Tuple = XLMProphetNetTokenizer(_a , keep_accents=_a )
_a : Union[str, Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_a , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
_a : Optional[int] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_a : List[Any] = tokenizer.convert_tokens_to_ids(_a )
self.assertListEqual(
_a , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, -9, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, -9, 4]
] , )
_a : List[str] = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''[UNK]''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''[UNK]''',
'''.''',
] , )
@cached_property
def __lowercase ( self ) -> List[str]:
return XLMProphetNetTokenizer.from_pretrained('''microsoft/xprophetnet-large-wiki100-cased''' )
@slow
def __lowercase ( self ) -> Tuple:
_a : str = '''Hello World!'''
_a : Tuple = [3_5_3_8_9, 6_6_7_2, 4_9, 2]
self.assertListEqual(_a , self.big_tokenizer.encode(_a ) )
@slow
def __lowercase ( self ) -> str:
# fmt: off
_a : str = {'''input_ids''': [[1_1_0_7_3, 8_2_7_8_3, 1_8, 2_6, 8_2_7_8_3, 5_4_9, 5_1_5_4_0, 2_4_8, 1_7_2_0_9, 1_3_0_1, 2_1_7, 2_0, 2_1_5_1_8_6, 1_3_2_5, 1_4_7, 1_7_2_0_9, 1_3_0_1, 2_1_7, 2_0, 5_6_3_7_0, 5_3, 1_2_2_0_2_0, 2_0, 1_6_4_7_7, 2_7, 8_7_3_5_5, 4_5_4_8, 2_0, 4_7_2_8, 7_8_3_9_2, 1_7, 1_5_9_9_6_9, 1_8, 2_6, 2_4_4_9_1, 6_2_9, 1_5, 5_3_8, 2_2_7_0_4, 5_4_3_9, 1_5, 2_7_8_8, 2_4_4_9_1, 9_8_8_5, 1_5, 4_3_5_3_4, 6_0_5, 1_5, 8_1_4, 1_8_4_0_3, 3_3_2_0_0, 2_9, 1_5, 4_3_5_3_4, 2_4_4_5_8, 1_2_4_1_0, 1_1_1, 2_4_9_6_6, 8_3_6_6_9, 9_6_3_7, 1_4_4_0_6_8, 2_6, 8_5_0, 2_2_3_4_6, 2_7, 1_4_7, 2_4_9_6_6, 8_3_6_6_9, 8_3_4_9_0, 2_6, 3_9_1_1_3, 7_3_5, 2_7, 6_8_9, 6_5_6, 2_8_0_0, 1_3_3_9, 4_6_0_0, 5_3, 1_2_2_0_2_0, 1_1_5_7_8_5, 3_4, 8_1_6, 1_3_3_9, 4_6_8_8_7, 1_8, 1_4_7, 5_3_9_0_5, 1_9_5_1, 4_2_2_3_8, 4_1_1_7_0, 1_7_7_3_2, 8_3_4, 4_3_6, 1_5, 2_7_5_2_3, 9_8_7_3_3, 2_1_7, 1_4_7, 5_5_4_2, 4_9_8_1, 9_3_0, 1_7_3_4_7, 1_6, 2], [2_0_0_9_1, 6_2_9, 9_4, 8_2_7_8_6, 5_8, 4_9_0, 2_0, 1_5_2_8, 8_4, 5_3_9_0_5, 3_4_4, 8_0_5_9_2, 1_1_0_1_2_8, 1_8_8_2_2, 5_2_6_7, 1_3_0_6, 6_2, 1_5_2_5_3_7, 3_0_8, 7_9_9_7, 4_0_1, 1_2_4_4_2_7, 5_4_9, 3_5_4_4_2, 2_2_5, 1_0_9, 1_5_0_5_5, 2_5_7_4_8, 1_4_7, 7_1_1_9, 4_3_7_1_2, 3_4, 7_6_7, 1_3_5_3_6_6, 1_8, 1_6, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_9_2, 6_3_7_8_4, 1_1_9_4_6_6, 1_7, 1_4_7_8_0_8, 8_8_2_1_4, 1_8, 6_5_6, 8_1, 3_2, 3_2_9_6, 1_0_2_8_0, 1_6, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name='''microsoft/xprophetnet-large-wiki100-cased''' , revision='''1acad1643ddd54a44df6a1b797ada8373685d90e''' , )
| 14 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a=1_3 , _a=3 , _a=True , _a=True , _a=0.1 , _a=0.1 , _a=2_2_4 , _a=1_0_0_0 , _a=[3, 3, 6, 4] , _a=[4_8, 5_6, 1_1_2, 2_2_0] , ) -> Tuple:
_a : Dict = parent
_a : Optional[int] = batch_size
_a : Optional[Any] = num_channels
_a : Union[str, Any] = is_training
_a : Tuple = use_labels
_a : Dict = hidden_dropout_prob
_a : List[Any] = attention_probs_dropout_prob
_a : Dict = num_labels
_a : List[str] = image_size
_a : Dict = layer_depths
_a : str = embed_dims
def __lowercase ( self ) -> Optional[Any]:
_a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : int = None
if self.use_labels:
_a : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
_a : Dict = self.get_config()
return config, pixel_values, labels
def __lowercase ( self ) -> int:
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='''gelu''' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_a , layer_scale_init_value=1e-5 , )
def __lowercase ( self , _a , _a , _a ) -> str:
_a : List[Any] = SwiftFormerModel(config=_a )
model.to(_a )
model.eval()
_a : Optional[int] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def __lowercase ( self , _a , _a , _a ) -> Optional[Any]:
_a : List[str] = self.num_labels
_a : Optional[int] = SwiftFormerForImageClassification(_a )
model.to(_a )
model.eval()
_a : List[str] = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
_a : Union[str, Any] = SwiftFormerForImageClassification(_a )
model.to(_a )
model.eval()
_a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Optional[Any] = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase ( self ) -> Tuple:
((_a) , (_a) , (_a)) : Optional[int] = self.prepare_config_and_inputs()
_a : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
UpperCAmelCase__ : Optional[int] = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : str = False
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : str = False
def __lowercase ( self ) -> Optional[int]:
_a : Union[str, Any] = SwiftFormerModelTester(self )
_a : int = ConfigTester(
self , config_class=_a , has_text_modality=_a , hidden_size=3_7 , num_attention_heads=1_2 , num_hidden_layers=1_2 , )
def __lowercase ( self ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='''SwiftFormer does not use inputs_embeds''' )
def __lowercase ( self ) -> Union[str, Any]:
pass
def __lowercase ( self ) -> Dict:
_a , _a : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Any = model_class(_a )
_a : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def __lowercase ( self ) -> str:
_a , _a : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Optional[int] = model_class(_a )
_a : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Tuple = [*signature.parameters.keys()]
_a : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __lowercase ( self ) -> int:
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __lowercase ( self ) -> Optional[int]:
_a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __lowercase ( self ) -> Optional[Any]:
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Any = SwiftFormerModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@unittest.skip(reason='''SwiftFormer does not output attentions''' )
def __lowercase ( self ) -> List[Any]:
pass
def __lowercase ( self ) -> int:
def check_hidden_states_output(_a , _a , _a ):
_a : Optional[int] = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_a : Union[str, Any] = model(**self._prepare_for_class(_a , _a ) )
_a : Optional[Any] = outputs.hidden_states
_a : Union[str, Any] = 8
self.assertEqual(len(_a ) , _a ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(_a ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
_a , _a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : str = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : List[str] = True
check_hidden_states_output(_a , _a , _a )
def __lowercase ( self ) -> str:
def _config_zero_init(_a ):
_a : List[Any] = copy.deepcopy(_a )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(_a , _a , 1e-1_0 )
if isinstance(getattr(_a , _a , _a ) , _a ):
_a : int = _config_zero_init(getattr(_a , _a ) )
setattr(_a , _a , _a )
return configs_no_init
_a , _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
_a : Dict = _config_zero_init(_a )
for model_class in self.all_model_classes:
_a : Dict = model_class(config=_a )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowercase ( self ) -> Optional[Any]:
pass
def __UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
_a : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self ) -> str:
return ViTImageProcessor.from_pretrained('''MBZUAI/swiftformer-xs''' ) if is_vision_available() else None
@slow
def __lowercase ( self ) -> Dict:
_a : Any = SwiftFormerForImageClassification.from_pretrained('''MBZUAI/swiftformer-xs''' ).to(_a )
_a : Any = self.default_image_processor
_a : Any = prepare_img()
_a : Any = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
_a : Optional[Any] = model(**_a )
# verify the logits
_a : List[str] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _a )
_a : int = torch.tensor([[-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0]] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
| 14 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"microsoft/unispeech-large-1500h-cv": (
"https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class UpperCAmelCase ( __snake_case ):
lowercase = """unispeech"""
def __init__( self : str , __magic_name__ : Optional[Any]=3_2 , __magic_name__ : Optional[Any]=7_6_8 , __magic_name__ : Dict=1_2 , __magic_name__ : List[str]=1_2 , __magic_name__ : Any=3_0_7_2 , __magic_name__ : List[Any]="gelu" , __magic_name__ : Tuple=0.1 , __magic_name__ : Dict=0.1 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : List[Any]=0.0 , __magic_name__ : Any=0.0 , __magic_name__ : int=0.1 , __magic_name__ : int=0.1 , __magic_name__ : int=0.02 , __magic_name__ : Optional[int]=1e-5 , __magic_name__ : Optional[int]="group" , __magic_name__ : Optional[Any]="gelu" , __magic_name__ : str=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , __magic_name__ : Any=(5, 2, 2, 2, 2, 2, 2) , __magic_name__ : int=(1_0, 3, 3, 3, 3, 2, 2) , __magic_name__ : Union[str, Any]=False , __magic_name__ : Optional[int]=1_2_8 , __magic_name__ : List[str]=1_6 , __magic_name__ : Dict=False , __magic_name__ : Union[str, Any]=True , __magic_name__ : Any=0.05 , __magic_name__ : Union[str, Any]=1_0 , __magic_name__ : Dict=2 , __magic_name__ : Any=0.0 , __magic_name__ : Tuple=1_0 , __magic_name__ : str=0 , __magic_name__ : Dict=3_2_0 , __magic_name__ : Tuple=2 , __magic_name__ : Tuple=0.1 , __magic_name__ : Tuple=1_0_0 , __magic_name__ : int=2_5_6 , __magic_name__ : List[str]=2_5_6 , __magic_name__ : Dict=0.1 , __magic_name__ : Optional[int]="mean" , __magic_name__ : Any=False , __magic_name__ : Optional[Any]=False , __magic_name__ : Any=2_5_6 , __magic_name__ : Optional[int]=8_0 , __magic_name__ : Dict=0 , __magic_name__ : Tuple=1 , __magic_name__ : Optional[int]=2 , __magic_name__ : Optional[int]=0.5 , **__magic_name__ : Any , ):
"""simple docstring"""
super().__init__(**__magic_name__ , pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ )
UpperCamelCase = hidden_size
UpperCamelCase = feat_extract_norm
UpperCamelCase = feat_extract_activation
UpperCamelCase = list(__magic_name__ )
UpperCamelCase = list(__magic_name__ )
UpperCamelCase = list(__magic_name__ )
UpperCamelCase = conv_bias
UpperCamelCase = num_conv_pos_embeddings
UpperCamelCase = num_conv_pos_embedding_groups
UpperCamelCase = len(self.conv_dim )
UpperCamelCase = num_hidden_layers
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_dropout
UpperCamelCase = attention_dropout
UpperCamelCase = activation_dropout
UpperCamelCase = feat_proj_dropout
UpperCamelCase = final_dropout
UpperCamelCase = layerdrop
UpperCamelCase = layer_norm_eps
UpperCamelCase = initializer_range
UpperCamelCase = num_ctc_classes
UpperCamelCase = vocab_size
UpperCamelCase = do_stable_layer_norm
UpperCamelCase = use_weighted_layer_sum
UpperCamelCase = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase = apply_spec_augment
UpperCamelCase = mask_time_prob
UpperCamelCase = mask_time_length
UpperCamelCase = mask_time_min_masks
UpperCamelCase = mask_feature_prob
UpperCamelCase = mask_feature_length
UpperCamelCase = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCamelCase = num_codevectors_per_group
UpperCamelCase = num_codevector_groups
UpperCamelCase = contrastive_logits_temperature
UpperCamelCase = feat_quantizer_dropout
UpperCamelCase = num_negatives
UpperCamelCase = codevector_dim
UpperCamelCase = proj_codevector_dim
UpperCamelCase = diversity_loss_weight
# ctc loss
UpperCamelCase = ctc_loss_reduction
UpperCamelCase = ctc_zero_infinity
# pretraining loss
UpperCamelCase = replace_prob
@property
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 713 |
import math
def _lowercase ( SCREAMING_SNAKE_CASE_ : int = 100 ):
"""simple docstring"""
UpperCamelCase = sum(i * i for i in range(1 , n + 1 ) )
UpperCamelCase = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 181 | 0 |
from typing import List
import numpy as np
def __snake_case ( __UpperCamelCase : dict ):
"""simple docstring"""
A_ = {key: len(__UpperCamelCase ) for key, value in gen_kwargs.items() if isinstance(__UpperCamelCase ,__UpperCamelCase )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
"Sharding is ambiguous for this dataset: "
+ "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n"
+ "\n".join(f'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() )
+ "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, "
+ "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."
) )
A_ = max(lists_lengths.values() ,default=0 )
return max(1 ,__UpperCamelCase )
def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : int ):
"""simple docstring"""
A_ = []
for group_idx in range(__UpperCamelCase ):
A_ = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
A_ = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
A_ = range(__UpperCamelCase ,start + num_shards_to_add )
shards_indices_per_group.append(__UpperCamelCase )
return shards_indices_per_group
def __snake_case ( __UpperCamelCase : dict ,__UpperCamelCase : int ):
"""simple docstring"""
A_ = _number_of_shards_in_gen_kwargs(__UpperCamelCase )
if num_shards == 1:
return [dict(__UpperCamelCase )]
else:
A_ = _distribute_shards(num_shards=__UpperCamelCase ,max_num_jobs=__UpperCamelCase )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(__UpperCamelCase ,__UpperCamelCase )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(__UpperCamelCase ) )
]
def __snake_case ( __UpperCamelCase : List[dict] ):
"""simple docstring"""
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] ,__UpperCamelCase )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def __snake_case ( __UpperCamelCase : np.random.Generator ,__UpperCamelCase : dict ):
"""simple docstring"""
A_ = {len(__UpperCamelCase ) for value in gen_kwargs.values() if isinstance(__UpperCamelCase ,__UpperCamelCase )}
A_ = {}
for size in list_sizes:
A_ = list(range(__UpperCamelCase ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
A_ = dict(__UpperCamelCase )
for key, value in shuffled_kwargs.items():
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = [value[i] for i in indices_per_size[len(__UpperCamelCase )]]
return shuffled_kwargs | 86 |
'''simple docstring'''
from __future__ import annotations
snake_case_ : str = '''#'''
class A_ :
'''simple docstring'''
def __init__( self ):
_UpperCamelCase = {}
def a ( self , A_ ):
_UpperCamelCase = self._trie
for char in text:
if char not in trie:
_UpperCamelCase = {}
_UpperCamelCase = trie[char]
_UpperCamelCase = True
def a ( self , A_ ):
_UpperCamelCase = self._trie
for char in prefix:
if char in trie:
_UpperCamelCase = trie[char]
else:
return []
return self._elements(A_ )
def a ( self , A_ ):
_UpperCamelCase = []
for c, v in d.items():
_UpperCamelCase = [" "] if c == END else [(c + s) for s in self._elements(A_ )]
result.extend(A_ )
return tuple(A_ )
snake_case_ : Optional[int] = Trie()
snake_case_ : List[str] = ('''depart''', '''detergent''', '''daring''', '''dog''', '''deer''', '''deal''')
for word in words:
trie.insert_word(word)
def lowercase__( _UpperCamelCase : str )-> tuple:
"""simple docstring"""
_UpperCamelCase = trie.find_word(_UpperCamelCase )
return tuple(string + word for word in suffixes )
def lowercase__( )-> None:
"""simple docstring"""
print(autocomplete_using_trie("de" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 138 | 0 |
"""simple docstring"""
def UpperCAmelCase_ ( __a : str , __a : str ):
'''simple docstring'''
if not (isinstance(__a , __a ) and isinstance(__a , __a )):
raise ValueError('longest_common_substring() takes two strings for inputs' )
_lowerCamelCase : List[Any] = len(__a )
_lowerCamelCase : int = len(__a )
_lowerCamelCase : Dict = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
_lowerCamelCase : Any = 0
_lowerCamelCase : List[str] = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
_lowerCamelCase : List[str] = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
_lowerCamelCase : Dict = i
_lowerCamelCase : str = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 |
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
a_ = [
"""good first issue""",
"""feature request""",
"""wip""",
]
def UpperCAmelCase_ ( ):
'''simple docstring'''
_lowerCamelCase : str = Github(os.environ['GITHUB_TOKEN'] )
_lowerCamelCase : Optional[Any] = g.get_repo('huggingface/accelerate' )
_lowerCamelCase : Optional[int] = repo.get_issues(state='open' )
for issue in open_issues:
_lowerCamelCase : List[str] = sorted([comment for comment in issue.get_comments()] , key=lambda __a : i.created_at , reverse=__a )
_lowerCamelCase : Tuple = comments[0] if len(__a ) > 0 else None
_lowerCamelCase : Optional[Any] = dt.utcnow()
_lowerCamelCase : int = (current_time - issue.updated_at).days
_lowerCamelCase : Tuple = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='closed' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 349 | 1 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
__UpperCamelCase : Optional[Any] = logging.getLogger(__name__)
@dataclass(frozen=a__ )
class a :
snake_case__ = 42
snake_case__ = 42
snake_case__ = None
snake_case__ = None
snake_case__ = None
@dataclass(frozen=a__ )
class a :
snake_case__ = 42
snake_case__ = None
snake_case__ = None
snake_case__ = None
snake_case__ = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class a ( a__ ):
snake_case__ = 42
def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case = None , _snake_case=False , _snake_case = False , ):
"""simple docstring"""
lowerCAmelCase = hans_processors[task]()
lowerCAmelCase = os.path.join(
_snake_case , 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(_snake_case ) , _snake_case , ) , )
lowerCAmelCase = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCAmelCase ,lowerCAmelCase = label_list[2], label_list[1]
lowerCAmelCase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCAmelCase = cached_features_file + '.lock'
with FileLock(_snake_case ):
if os.path.exists(_snake_case ) and not overwrite_cache:
logger.info(F'Loading features from cached file {cached_features_file}' )
lowerCAmelCase = torch.load(_snake_case )
else:
logger.info(F'Creating features from dataset file at {data_dir}' )
lowerCAmelCase = (
processor.get_dev_examples(_snake_case ) if evaluate else processor.get_train_examples(_snake_case )
)
logger.info('Training examples: %s' , len(_snake_case ) )
lowerCAmelCase = hans_convert_examples_to_features(_snake_case , _snake_case , _snake_case , _snake_case )
logger.info('Saving features into cached file %s' , _snake_case )
torch.save(self.features , _snake_case )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , _snake_case ):
"""simple docstring"""
return self.features[i]
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.label_list
if is_tf_available():
import tensorflow as tf
class a :
snake_case__ = 42
def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case = 1_28 , _snake_case=False , _snake_case = False , ):
"""simple docstring"""
lowerCAmelCase = hans_processors[task]()
lowerCAmelCase = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCAmelCase ,lowerCAmelCase = label_list[2], label_list[1]
lowerCAmelCase = label_list
lowerCAmelCase = processor.get_dev_examples(_snake_case ) if evaluate else processor.get_train_examples(_snake_case )
lowerCAmelCase = hans_convert_examples_to_features(_snake_case , _snake_case , _snake_case , _snake_case )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='convert examples to features' ):
if ex_index % 1_00_00 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(_snake_case )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
lowerCAmelCase = tf.data.Dataset.from_generator(
_snake_case , (
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) , (
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.dataset
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , _snake_case ):
"""simple docstring"""
return self.features[i]
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.label_list
class a ( a__ ):
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(_snake_case , 'heuristics_train_set.txt' ) ) , 'train' )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(_snake_case , 'heuristics_evaluation_set.txt' ) ) , 'dev' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return ["contradiction", "entailment", "neutral"]
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = []
for i, line in enumerate(_snake_case ):
if i == 0:
continue
lowerCAmelCase = '%s-%s' % (set_type, line[0])
lowerCAmelCase = line[5]
lowerCAmelCase = line[6]
lowerCAmelCase = line[7][2:] if line[7].startswith('ex' ) else line[7]
lowerCAmelCase = line[0]
examples.append(InputExample(guid=_snake_case , text_a=_snake_case , text_b=_snake_case , label=_snake_case , pairID=_snake_case ) )
return examples
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[InputExample] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : PreTrainedTokenizer , ):
lowerCAmelCase = {label: i for i, label in enumerate(_UpperCAmelCase )}
lowerCAmelCase = []
for ex_index, example in tqdm.tqdm(enumerate(_UpperCAmelCase ) , desc='convert examples to features' ):
if ex_index % 1_0000 == 0:
logger.info('Writing example %d' % (ex_index) )
lowerCAmelCase = tokenizer(
example.text_a , example.text_b , add_special_tokens=_UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' , truncation=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , )
lowerCAmelCase = label_map[example.label] if example.label in label_map else 0
lowerCAmelCase = int(example.pairID )
features.append(InputFeatures(**_UpperCAmelCase , label=_UpperCAmelCase , pairID=_UpperCAmelCase ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(F'guid: {example}' )
logger.info(F'features: {features[i]}' )
return features
__UpperCamelCase : Optional[Any] = {
'''hans''': 3,
}
__UpperCamelCase : int = {
'''hans''': HansProcessor,
}
| 4 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase : Dict = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = ['''LayoutXLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = ['''LayoutXLMTokenizerFast''']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
__UpperCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase : str = {
'configuration_blenderbot': [
'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotConfig',
'BlenderbotOnnxConfig',
],
'tokenization_blenderbot': ['BlenderbotTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Optional[Any] = ['BlenderbotTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Union[str, Any] = [
'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotForCausalLM',
'BlenderbotForConditionalGeneration',
'BlenderbotModel',
'BlenderbotPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : int = [
'TFBlenderbotForConditionalGeneration',
'TFBlenderbotModel',
'TFBlenderbotPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Optional[Any] = [
'FlaxBlenderbotForConditionalGeneration',
'FlaxBlenderbotModel',
'FlaxBlenderbotPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 249 |
from manim import *
class lowerCamelCase ( SCREAMING_SNAKE_CASE ):
def snake_case_ ( self : int ) -> Tuple:
_a : Optional[int] = Rectangle(height=0.5 , width=0.5 )
_a : Dict = Rectangle(height=0.25 , width=0.25 )
_a : str = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_a : Optional[int] = [mem.copy() for i in range(6 )]
_a : Tuple = [mem.copy() for i in range(6 )]
_a : Dict = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
_a : Any = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
_a : str = VGroup(__snake_case , __snake_case ).arrange(__snake_case , buff=0 )
_a : List[str] = Text('''CPU''' , font_size=24 )
_a : Union[str, Any] = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__snake_case )
_a : Union[str, Any] = [mem.copy() for i in range(4 )]
_a : Tuple = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
_a : Tuple = Text('''GPU''' , font_size=24 )
_a : List[Any] = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
gpu.move_to([-1, -1, 0] )
self.add(__snake_case )
_a : Optional[int] = [mem.copy() for i in range(6 )]
_a : Any = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
_a : Union[str, Any] = Text('''Model''' , font_size=24 )
_a : str = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
model.move_to([3, -1.0, 0] )
self.add(__snake_case )
_a : Optional[Any] = []
_a : Optional[Any] = []
_a : Any = []
for i, rect in enumerate(__snake_case ):
rect.set_stroke(__snake_case )
_a : Optional[Any] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__snake_case , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__snake_case )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=__snake_case , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=__snake_case , buff=0.0 )
self.add(__snake_case )
model_cpu_arr.append(__snake_case )
self.add(*__snake_case , *__snake_case , *__snake_case )
_a : List[Any] = [mem.copy() for i in range(6 )]
_a : str = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
_a : Union[str, Any] = Text('''Loaded Checkpoint''' , font_size=24 )
_a : Any = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
checkpoint.move_to([3, 0.5, 0] )
self.add(__snake_case )
_a : Dict = []
_a : Tuple = []
for i, rect in enumerate(__snake_case ):
_a : str = fill.copy().set_fill(__snake_case , opacity=0.7 )
target.move_to(__snake_case )
ckpt_arr.append(__snake_case )
_a : Optional[int] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(__snake_case )
self.add(*__snake_case , *__snake_case )
_a : int = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_a : Union[str, Any] = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__snake_case , __snake_case )
_a : Any = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(__snake_case , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__snake_case )
_a : str = MarkupText(
f"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
_a : Optional[Any] = [meta_mem.copy() for i in range(6 )]
_a : Union[str, Any] = [meta_mem.copy() for i in range(6 )]
_a : Union[str, Any] = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
_a : Optional[int] = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
_a : Optional[int] = VGroup(__snake_case , __snake_case ).arrange(__snake_case , buff=0 )
_a : Dict = Text('''Disk''' , font_size=24 )
_a : List[str] = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(__snake_case , run_time=3 ) , Write(__snake_case , run_time=1 ) , Create(__snake_case , run_time=1 ) )
_a : List[Any] = []
for i, rect in enumerate(__snake_case ):
_a : Dict = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(__snake_case , run_time=1.5 ) )
self.play(*__snake_case )
self.play(FadeOut(__snake_case ) )
_a : Optional[int] = MarkupText(f"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case , run_time=3 ) )
self.play(
FadeOut(__snake_case , __snake_case , *__snake_case , *__snake_case ) , )
self.wait()
| 249 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class snake_case_ :
"""simple docstring"""
A_ = 42
A_ = 42
class snake_case_ :
"""simple docstring"""
def __init__( self , lowerCamelCase_) -> int:
UpperCamelCase = [[] for _ in range(lowerCamelCase_)]
UpperCamelCase = size
def __getitem__( self , lowerCamelCase_) -> Iterator[Edge]:
return iter(self._graph[vertex])
@property
def UpperCAmelCase__ ( self) -> Union[str, Any]:
return self._size
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Union[str, Any]:
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''')
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''')
self._graph[from_vertex].append(Edge(lowerCamelCase_ , lowerCamelCase_))
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_) -> int | None:
UpperCamelCase = deque([start_vertex])
UpperCamelCase = [None] * self.size
UpperCamelCase = 0
while queue:
UpperCamelCase = queue.popleft()
UpperCamelCase = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
UpperCamelCase = current_distance + edge.weight
UpperCamelCase = distances[edge.destination_vertex]
if (
isinstance(lowerCamelCase_ , lowerCamelCase_)
and new_distance >= dest_vertex_distance
):
continue
UpperCamelCase = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex)
else:
queue.append(edge.destination_vertex)
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''')
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod() | 34 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def lowerCamelCase ( _UpperCamelCase : Callable , _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float ) -> np.array:
'''simple docstring'''
__UpperCAmelCase : Tuple = int(np.ceil((x_end - xa) / step_size ) )
__UpperCAmelCase : Optional[int] = np.zeros((n + 1,) )
__UpperCAmelCase : List[Any] = ya
__UpperCAmelCase : List[Any] = xa
for k in range(_UpperCamelCase ):
__UpperCAmelCase : str = y[k] + step_size * ode_func(_UpperCamelCase , y[k] )
__UpperCAmelCase : Tuple = y[k] + (
(step_size / 2) * (ode_func(_UpperCamelCase , y[k] ) + ode_func(x + step_size , _UpperCamelCase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 139 | 0 |
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('repo_id' , ['canonical_dataset_name', 'org-name/dataset-name'] )
@pytest.mark.parametrize('path' , ['filename.csv', 'filename with blanks.csv'] )
@pytest.mark.parametrize('revision' , [None, 'v2'] )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[Any] , snake_case__ :Dict , snake_case__ :Dict ) -> List[Any]:
_lowercase = hf_hub_url(repo_id=snake_case__ , path=snake_case__ , revision=snake_case__ )
assert url == F"""https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(snake_case__ )}"""
| 707 |
snake_case = """0.18.2"""
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor | 535 | 0 |
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
snake_case = logging.get_logger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
UpperCamelCase_ : str = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(glue_processors.keys() )} )
UpperCamelCase_ : str = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
UpperCamelCase_ : int = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCamelCase_ : bool = field(
default=lowerCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def _A ( self : int ):
SCREAMING_SNAKE_CASE : str = self.task_name.lower()
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = '''train'''
UpperCamelCase_ : Dict = '''dev'''
UpperCamelCase_ : int = '''test'''
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : GlueDataTrainingArguments
UpperCamelCase_ : str
UpperCamelCase_ : List[InputFeatures]
def __init__( self : List[str] , UpperCAmelCase_ : GlueDataTrainingArguments , UpperCAmelCase_ : PreTrainedTokenizerBase , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Union[str, Split] = Split.train , UpperCAmelCase_ : Optional[str] = None , ):
warnings.warn(
"This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py" , lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE : Dict = args
SCREAMING_SNAKE_CASE : Optional[int] = glue_processors[args.task_name]()
SCREAMING_SNAKE_CASE : Optional[int] = glue_output_modes[args.task_name]
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
try:
SCREAMING_SNAKE_CASE : Optional[int] = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
# Load data features from cache or dataset file
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}''' , )
SCREAMING_SNAKE_CASE : List[str] = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
SCREAMING_SNAKE_CASE : Optional[Any] = label_list[2], label_list[1]
SCREAMING_SNAKE_CASE : int = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE : Any = cached_features_file + ".lock"
with FileLock(lowerCAmelCase__ ):
if os.path.exists(lowerCAmelCase__ ) and not args.overwrite_cache:
SCREAMING_SNAKE_CASE : Union[str, Any] = time.time()
SCREAMING_SNAKE_CASE : List[str] = torch.load(lowerCAmelCase__ )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
else:
logger.info(f'''Creating features from dataset file at {args.data_dir}''' )
if mode == Split.dev:
SCREAMING_SNAKE_CASE : List[str] = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
SCREAMING_SNAKE_CASE : Optional[Any] = self.processor.get_test_examples(args.data_dir )
else:
SCREAMING_SNAKE_CASE : List[Any] = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
SCREAMING_SNAKE_CASE : Optional[int] = examples[:limit_length]
SCREAMING_SNAKE_CASE : Union[str, Any] = glue_convert_examples_to_features(
lowerCAmelCase__ , lowerCAmelCase__ , max_length=args.max_seq_length , label_list=lowerCAmelCase__ , output_mode=self.output_mode , )
SCREAMING_SNAKE_CASE : int = time.time()
torch.save(self.features , lowerCAmelCase__ )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self : Dict ):
return len(self.features )
def __getitem__( self : List[Any] , UpperCAmelCase_ : Dict ):
return self.features[i]
def _A ( self : List[str] ):
return self.label_list
| 62 | '''simple docstring'''
import math
def __UpperCAmelCase ( a_: int ):
_UpperCAmelCase : Any = [True] * n
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : str = False
_UpperCAmelCase : int = True
for i in range(3, int(n**0.5 + 1 ), 2 ):
_UpperCAmelCase : Any = i * 2
while index < n:
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : int = index + i
_UpperCAmelCase : Optional[int] = [2]
for i in range(3, a_, 2 ):
if is_prime[i]:
primes.append(a_ )
return primes
def __UpperCAmelCase ( a_: int = 999_966_663_333 ):
_UpperCAmelCase : Tuple = math.floor(math.sqrt(a_ ) ) + 100
_UpperCAmelCase : Union[str, Any] = prime_sieve(a_ )
_UpperCAmelCase : Optional[int] = 0
_UpperCAmelCase : Tuple = 0
_UpperCAmelCase : int = primes[prime_index]
while (last_prime**2) <= limit:
_UpperCAmelCase : List[str] = primes[prime_index + 1]
_UpperCAmelCase : Any = last_prime**2
_UpperCAmelCase : Optional[Any] = next_prime**2
# Get numbers divisible by lps(current)
_UpperCAmelCase : List[str] = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
_UpperCAmelCase : Dict = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
_UpperCAmelCase : Dict = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
_UpperCAmelCase : Union[str, Any] = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution()) | 494 | 0 |
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def _snake_case ( SCREAMING_SNAKE_CASE = True , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
if not is_tqdm_available():
raise ImportError("Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`." )
_lowerCAmelCase : Optional[Any] = False
if main_process_only:
_lowerCAmelCase : int = PartialState().local_process_index == 0
return _tqdm(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , disable=SCREAMING_SNAKE_CASE )
| 712 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class A__ ( A ):
"""simple docstring"""
_lowercase : List[str] = ''''''
_lowercase : Optional[Any] = '''hf-legacy''' # "hf://"" is reserved for hffs
def __init__( self : Dict , A_ : Optional[DatasetInfo] = None , A_ : Optional[str] = None , **A_ : int , ):
'''simple docstring'''
super().__init__(self , **A_ )
_lowerCAmelCase : Union[str, Any] = repo_info
_lowerCAmelCase : Optional[int] = token
_lowerCAmelCase : str = None
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
if self.dir_cache is None:
_lowerCAmelCase : int = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
_lowerCAmelCase : Optional[int] = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(A_ ): {"name": str(A_ ), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def __magic_name__ ( self : Dict , A_ : str , A_ : str = "rb" , **A_ : Optional[int] , ):
'''simple docstring'''
if not isinstance(self.repo_info , A_ ):
raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
_lowerCAmelCase : str = hf_hub_url(self.repo_info.id , A_ , revision=self.repo_info.sha )
return fsspec.open(
A_ , mode=A_ , headers=get_authentication_headers_for_url(A_ , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open()
def __magic_name__ ( self : Union[str, Any] , A_ : List[Any] , **A_ : str ):
'''simple docstring'''
self._get_dirs()
_lowerCAmelCase : Optional[int] = self._strip_protocol(A_ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(A_ )
def __magic_name__ ( self : Any , A_ : str , A_ : Any=False , **A_ : Dict ):
'''simple docstring'''
self._get_dirs()
_lowerCAmelCase : Dict = PurePosixPath(path.strip("/" ) )
_lowerCAmelCase : str = {}
for p, f in self.dir_cache.items():
_lowerCAmelCase : List[str] = PurePosixPath(p.strip("/" ) )
_lowerCAmelCase : Tuple = p.parent
if root == path:
_lowerCAmelCase : Union[str, Any] = f
_lowerCAmelCase : List[str] = list(paths.values() )
if detail:
return out
else:
return sorted(f["name"] for f in out )
| 503 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"caidas/swin2sr-classicalsr-x2-64": (
"https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"
),
}
class _snake_case ( _a ):
_A : Union[str, Any] = '''swin2sr'''
_A : Optional[int] = {
'''hidden_size''': '''embed_dim''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : int ,SCREAMING_SNAKE_CASE__ : Tuple=64 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=1 ,SCREAMING_SNAKE_CASE__ : Dict=3 ,SCREAMING_SNAKE_CASE__ : Dict=180 ,SCREAMING_SNAKE_CASE__ : Dict=[6, 6, 6, 6, 6, 6] ,SCREAMING_SNAKE_CASE__ : int=[6, 6, 6, 6, 6, 6] ,SCREAMING_SNAKE_CASE__ : int=8 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=2.0 ,SCREAMING_SNAKE_CASE__ : Optional[int]=True ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.0 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.0 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 ,SCREAMING_SNAKE_CASE__ : List[str]="gelu" ,SCREAMING_SNAKE_CASE__ : Dict=False ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.02 ,SCREAMING_SNAKE_CASE__ : List[str]=1e-5 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 ,SCREAMING_SNAKE_CASE__ : Any=1.0 ,SCREAMING_SNAKE_CASE__ : List[str]="1conv" ,SCREAMING_SNAKE_CASE__ : Dict="pixelshuffle" ,**SCREAMING_SNAKE_CASE__ : Tuple ,):
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:List[str] = image_size
SCREAMING_SNAKE_CASE:Optional[int] = patch_size
SCREAMING_SNAKE_CASE:List[Any] = num_channels
SCREAMING_SNAKE_CASE:str = embed_dim
SCREAMING_SNAKE_CASE:Optional[Any] = depths
SCREAMING_SNAKE_CASE:Tuple = len(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:List[str] = num_heads
SCREAMING_SNAKE_CASE:int = window_size
SCREAMING_SNAKE_CASE:Optional[int] = mlp_ratio
SCREAMING_SNAKE_CASE:Optional[int] = qkv_bias
SCREAMING_SNAKE_CASE:List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE:Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE:Any = drop_path_rate
SCREAMING_SNAKE_CASE:List[str] = hidden_act
SCREAMING_SNAKE_CASE:Union[str, Any] = use_absolute_embeddings
SCREAMING_SNAKE_CASE:Optional[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE:Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE:Any = upscale
SCREAMING_SNAKE_CASE:Optional[int] = img_range
SCREAMING_SNAKE_CASE:int = resi_connection
SCREAMING_SNAKE_CASE:str = upsampler
| 143 |
'''simple docstring'''
A_ = frozenset(
[
"prompt",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
A_ = frozenset(["prompt", "negative_prompt"])
A_ = frozenset([])
A_ = frozenset(["image"])
A_ = frozenset(
[
"image",
"height",
"width",
"guidance_scale",
]
)
A_ = frozenset(["image"])
A_ = frozenset(
[
"prompt",
"image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
A_ = frozenset(["prompt", "image", "negative_prompt"])
A_ = frozenset(
[
# Text guided image variation with an image mask
"prompt",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
A_ = frozenset(["prompt", "image", "mask_image", "negative_prompt"])
A_ = frozenset(
[
# image variation with an image mask
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
A_ = frozenset(["image", "mask_image"])
A_ = frozenset(
[
"example_image",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
A_ = frozenset(["example_image", "image", "mask_image"])
A_ = frozenset(["class_labels"])
A_ = frozenset(["class_labels"])
A_ = frozenset(["batch_size"])
A_ = frozenset([])
A_ = frozenset(["batch_size"])
A_ = frozenset([])
A_ = frozenset(
[
"prompt",
"audio_length_in_s",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
A_ = frozenset(["prompt", "negative_prompt"])
A_ = frozenset(["input_tokens"])
A_ = frozenset(["input_tokens"])
| 143 | 1 |
"""simple docstring"""
from __future__ import annotations
lowercase__ = list[list[int]]
# assigning initial values to the grid
lowercase__ = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
lowercase__ = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def __magic_name__ ( _lowerCamelCase : Matrix , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int ):
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def __magic_name__ ( _lowerCamelCase : Matrix ):
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def __magic_name__ ( _lowerCamelCase : Matrix ):
if location := find_empty_location(_lowerCamelCase ):
__a : List[Any] = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 1_0 ):
if is_safe(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
__a : int = digit
if sudoku(_lowerCamelCase ) is not None:
return grid
__a : Optional[int] = 0
return None
def __magic_name__ ( _lowerCamelCase : Matrix ):
for row in grid:
for cell in row:
print(_lowerCamelCase , end=""" """ )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("\nExample grid:\n" + "=" * 20)
print_solution(example_grid)
print("\nExample grid solution:")
lowercase__ = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("Cannot find a solution.")
| 720 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowercase__ = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
_lowerCAmelCase = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_lowerCAmelCase = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
_lowerCAmelCase = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
_lowerCAmelCase = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" )
__a : Tuple = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
__a : Optional[Any] = text_classifier("""This is great !""" , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}] )
__a : int = text_classifier(["""This is great !""", """This is bad"""] , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase ) , [
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
] , )
__a : List[str] = text_classifier("""This is great !""" , top_k=1 )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
# Legacy behavior
__a : Optional[int] = text_classifier("""This is great !""" , return_all_scores=_lowercase )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
__a : Tuple = text_classifier("""This is great !""" , return_all_scores=_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}]] )
__a : Any = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
] , )
__a : Union[str, Any] = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [
{"""label""": """LABEL_0""", """score""": 0.504},
{"""label""": """LABEL_0""", """score""": 0.504},
] , )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
import torch
__a : Any = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" , device=torch.device("""cpu""" ) , )
__a : Optional[int] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
@require_tf
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""tf""" )
__a : List[str] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
@slow
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = pipeline("""text-classification""" )
__a : Tuple = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
__a : Optional[int] = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
__a : Union[str, Any] = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 0.988}] )
@slow
@require_tf
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = pipeline("""text-classification""" , framework="""tf""" )
__a : str = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
__a : Tuple = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
__a : str = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 0.988}] )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Dict = TextClassificationPipeline(model=_lowercase , tokenizer=_lowercase )
return text_classifier, ["HuggingFace is in", "This is another test"]
def lowerCAmelCase__(self , _lowercase , _lowercase ):
'''simple docstring'''
__a : List[str] = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
__a : Union[str, Any] = """HuggingFace is in"""
__a : List[str] = text_classifier(_lowercase )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
__a : Optional[int] = ["""HuggingFace is in """, """Paris is in France"""]
__a : Dict = text_classifier(_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}, {"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["""label"""] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
__a : Dict = text_classifier(_lowercase , top_k=_lowercase )
__a : Dict = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(_lowercase ) , [[{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] * N, [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] * N] , )
__a : Dict = {"""text""": """HuggingFace is in """, """text_pair""": """Paris is in France"""}
__a : Any = text_classifier(_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , {"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )} , )
self.assertTrue(outputs["""label"""] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
__a : Dict = [["""HuggingFace is in """, """Paris is in France"""]]
with self.assertRaises(_lowercase ):
text_classifier(_lowercase )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
__a : Optional[int] = text_classifier([[["""HuggingFace is in """, """Paris is in France"""]]] )
self.assertEqual(
nested_simplify(_lowercase ) , [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
| 63 | 0 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
def lowerCAmelCase (self : List[str] ):
__a : Union[str, Any] = '''ylacombe/bark-small'''
__a : Optional[Any] = tempfile.mkdtemp()
__a : Union[str, Any] = '''en_speaker_1'''
__a : str = '''This is a test string'''
__a : List[str] = '''speaker_embeddings_path.json'''
__a : Optional[int] = '''speaker_embeddings'''
def lowerCAmelCase (self : Any , **snake_case_ : int ):
return AutoTokenizer.from_pretrained(self.checkpoint , **snake_case_ )
def lowerCAmelCase (self : Optional[int] ):
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase (self : Optional[Any] ):
__a : Any = self.get_tokenizer()
__a : Dict = BarkProcessor(tokenizer=snake_case_ )
processor.save_pretrained(self.tmpdirname )
__a : Optional[Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def lowerCAmelCase (self : List[Any] ):
__a : str = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
__a : Optional[Any] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__a : Union[str, Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def lowerCAmelCase (self : List[str] ):
__a : Dict = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
__a : Union[str, Any] = 3_5
__a : Tuple = 2
__a : Optional[Any] = 8
__a : Dict = {
'''semantic_prompt''': np.ones(snake_case_ ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
__a : Any = processor(text=self.input_string , voice_preset=snake_case_ )
__a : Dict = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(snake_case_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
__a : Optional[int] = os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(snake_case_ , **snake_case_ )
__a : Union[str, Any] = processor(text=self.input_string , voice_preset=snake_case_ )
__a : List[Any] = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(snake_case_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
__a : str = processor(text=self.input_string , voice_preset=self.voice_preset )
def lowerCAmelCase (self : Tuple ):
__a : Union[str, Any] = self.get_tokenizer()
__a : List[str] = BarkProcessor(tokenizer=snake_case_ )
__a : Optional[Any] = processor(text=self.input_string )
__a : int = tokenizer(
self.input_string , padding='''max_length''' , max_length=2_5_6 , add_special_tokens=snake_case_ , return_attention_mask=snake_case_ , return_token_type_ids=snake_case_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 521 |
import numpy as np
from transformers import Pipeline
def __UpperCamelCase ( lowerCAmelCase__ : Tuple ):
__a : Union[str, Any] = np.max(lowerCAmelCase__ , axis=-1 , keepdims=lowerCAmelCase__ )
__a : List[Any] = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=lowerCAmelCase__ )
class UpperCamelCase__ ( __lowercase ):
def lowerCAmelCase (self : Dict , **snake_case_ : str ):
__a : int = {}
if "second_text" in kwargs:
__a : Tuple = kwargs['''second_text''']
return preprocess_kwargs, {}, {}
def lowerCAmelCase (self : str , snake_case_ : Dict , snake_case_ : str=None ):
return self.tokenizer(snake_case_ , text_pair=snake_case_ , return_tensors=self.framework )
def lowerCAmelCase (self : Tuple , snake_case_ : Dict ):
return self.model(**snake_case_ )
def lowerCAmelCase (self : Union[str, Any] , snake_case_ : Dict ):
__a : Optional[Any] = model_outputs.logits[0].numpy()
__a : Optional[int] = softmax(snake_case_ )
__a : Optional[int] = np.argmax(snake_case_ )
__a : str = self.model.config.idalabel[best_class]
__a : Any = probabilities[best_class].item()
__a : Optional[int] = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 521 | 1 |
'''simple docstring'''
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowercase =re.compile('[^A-Za-z_0-9]')
# parameters used in DuplicationIndex
lowercase =10
lowercase =256
def lowerCamelCase__ ( __lowerCamelCase : List[str] ):
'''simple docstring'''
if len(__lowerCamelCase ) < MIN_NUM_TOKENS:
return None
_UpperCAmelCase : List[Any] =MinHash(num_perm=__lowerCamelCase )
for token in set(__lowerCamelCase ):
min_hash.update(token.encode() )
return min_hash
def lowerCamelCase__ ( __lowerCamelCase : str ):
'''simple docstring'''
return {t for t in NON_ALPHA.split(__lowerCamelCase ) if len(t.strip() ) > 0}
class __magic_name__ :
def __init__( self , *,
snake_case = 0.85 , ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : str =duplication_jaccard_threshold
_UpperCAmelCase : Union[str, Any] =NUM_PERM
_UpperCAmelCase : List[Any] =MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm)
_UpperCAmelCase : int =defaultdict(snake_case)
def lowerCAmelCase ( self , snake_case , snake_case) -> None:
'''simple docstring'''
_UpperCAmelCase : Dict =self._index.query(snake_case)
if code_key in self._index.keys:
print(f"Duplicate key {code_key}")
return
self._index.insert(snake_case , snake_case)
if len(snake_case) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(snake_case)
break
else:
self._duplicate_clusters[close_duplicates[0]].add(snake_case)
def lowerCAmelCase ( self) -> List[List[Dict]]:
'''simple docstring'''
_UpperCAmelCase : List[Any] =[]
for base, duplicates in self._duplicate_clusters.items():
_UpperCAmelCase : Optional[int] =[base] + list(snake_case)
# reformat the cluster to be a list of dict
_UpperCAmelCase : Optional[Any] =[{'base_index': el[0], 'repo_name': el[1], 'path': el[2]} for el in cluster]
duplicate_clusters.append(snake_case)
return duplicate_clusters
def lowerCAmelCase ( self , snake_case) -> None:
'''simple docstring'''
_UpperCAmelCase : Any =self.get_duplicate_clusters()
with open(snake_case , 'w') as f:
json.dump(snake_case , snake_case)
def lowerCamelCase__ ( __lowerCamelCase : Dict ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : str =element
_UpperCAmelCase : Dict =get_min_hash([t for t in NON_ALPHA.split(data['content'] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def lowerCamelCase__ ( __lowerCamelCase : Type[Dataset] ):
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(__lowerCamelCase , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ):
if data is not None:
yield data
def lowerCamelCase__ ( __lowerCamelCase : Type[Dataset] , __lowerCamelCase : float ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =DuplicationIndex(duplication_jaccard_threshold=__lowerCamelCase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(__lowerCamelCase ) ) , max_queue_size=1_0_0 ) ):
di.add(__lowerCamelCase , __lowerCamelCase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : str ):
'''simple docstring'''
_UpperCAmelCase : Dict =get_tokens(__lowerCamelCase )
_UpperCAmelCase : str =get_tokens(__lowerCamelCase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowercase =None
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Dict ):
'''simple docstring'''
_UpperCAmelCase : Any =[]
for elementa in cluster:
_UpperCAmelCase : Any =_shared_dataset[elementa['base_index']]['content']
for elementa in extremes:
_UpperCAmelCase : Optional[Any] =_shared_dataset[elementa['base_index']]['content']
if jaccard_similarity(__lowerCamelCase , __lowerCamelCase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_UpperCAmelCase : List[Any] =1
extremes.append(__lowerCamelCase )
return extremes
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : Optional[int] ):
'''simple docstring'''
global _shared_dataset
_UpperCAmelCase : Any =dataset
_UpperCAmelCase : Optional[int] =[]
_UpperCAmelCase : Tuple =partial(_find_cluster_extremes_shared , jaccard_threshold=__lowerCamelCase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
__lowerCamelCase , __lowerCamelCase , ) , total=len(__lowerCamelCase ) , ):
extremes_list.append(__lowerCamelCase )
return extremes_list
def lowerCamelCase__ ( __lowerCamelCase : Type[Dataset] , __lowerCamelCase : float = 0.85 ):
'''simple docstring'''
_UpperCAmelCase : Tuple =make_duplicate_clusters(__lowerCamelCase , __lowerCamelCase )
_UpperCAmelCase : Tuple ={x['base_index'] for cluster in duplicate_clusters for x in cluster}
_UpperCAmelCase : Optional[Any] ={}
_UpperCAmelCase : Dict =find_extremes(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
for extremes in extremes_clusters:
for element in extremes:
_UpperCAmelCase : List[str] =element
_UpperCAmelCase : Union[str, Any] =duplicate_indices - set(extreme_dict.keys() )
_UpperCAmelCase : Dict =dataset.filter(lambda __lowerCamelCase , __lowerCamelCase : idx not in remove_indices , with_indices=__lowerCamelCase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_UpperCAmelCase : Dict =element['base_index'] in extreme_dict
if element["is_extreme"]:
_UpperCAmelCase : Optional[Any] =extreme_dict[element['base_index']]['copies']
print(f"Original dataset size: {len(__lowerCamelCase )}" )
print(f"Number of duplicate clusters: {len(__lowerCamelCase )}" )
print(f"Files in duplicate cluster: {len(__lowerCamelCase )}" )
print(f"Unique files in duplicate cluster: {len(__lowerCamelCase )}" )
print(f"Filtered dataset size: {len(__lowerCamelCase )}" )
return ds_filter, duplicate_clusters
| 331 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
lowercase ='docs/source/en/_toctree.yml'
def lowerCamelCase__ ( __lowerCamelCase : List[Any] ):
'''simple docstring'''
_UpperCAmelCase : str =defaultdict(__lowerCamelCase )
_UpperCAmelCase : str =[]
_UpperCAmelCase : Optional[Any] =[]
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'local': doc['local'], 'title': doc['title']} )
else:
new_doc_list.append(__lowerCamelCase )
_UpperCAmelCase : List[Any] =new_doc_list
_UpperCAmelCase : Dict =[key for key, value in counts.items() if value > 1]
_UpperCAmelCase : Tuple =[]
for duplicate_key in duplicates:
_UpperCAmelCase : List[Any] =list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} )
if len(__lowerCamelCase ) > 1:
raise ValueError(
f"{duplicate_key} is present several times in the documentation table of content at "
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] )
_UpperCAmelCase : Any =sorted(__lowerCamelCase , key=lambda __lowerCamelCase : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(__lowerCamelCase ) > 1:
raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' )
overview_doc.extend(__lowerCamelCase )
# Sort
return overview_doc
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any]=False ):
'''simple docstring'''
with open(__lowerCamelCase , encoding='utf-8' ) as f:
_UpperCAmelCase : Any =yaml.safe_load(f.read() )
# Get to the API doc
_UpperCAmelCase : Tuple =0
while content[api_idx]["title"] != "API":
api_idx += 1
_UpperCAmelCase : Optional[int] =content[api_idx]['sections']
# Then to the model doc
_UpperCAmelCase : Dict =0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
_UpperCAmelCase : Any =api_doc[scheduler_idx]['sections']
_UpperCAmelCase : Any =clean_doc_toc(__lowerCamelCase )
_UpperCAmelCase : Dict =False
if new_scheduler_doc != scheduler_doc:
_UpperCAmelCase : Optional[Any] =True
if overwrite:
_UpperCAmelCase : List[Any] =new_scheduler_doc
if diff:
if overwrite:
_UpperCAmelCase : List[Any] =api_doc
with open(__lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(__lowerCamelCase , allow_unicode=__lowerCamelCase ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any]=False ):
'''simple docstring'''
with open(__lowerCamelCase , encoding='utf-8' ) as f:
_UpperCAmelCase : Union[str, Any] =yaml.safe_load(f.read() )
# Get to the API doc
_UpperCAmelCase : Optional[int] =0
while content[api_idx]["title"] != "API":
api_idx += 1
_UpperCAmelCase : Dict =content[api_idx]['sections']
# Then to the model doc
_UpperCAmelCase : Optional[Any] =0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
_UpperCAmelCase : Tuple =False
_UpperCAmelCase : int =api_doc[pipeline_idx]['sections']
_UpperCAmelCase : List[Any] =[]
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
_UpperCAmelCase : Dict =pipeline_doc['section']
_UpperCAmelCase : Tuple =clean_doc_toc(__lowerCamelCase )
if overwrite:
_UpperCAmelCase : List[str] =new_sub_pipeline_doc
new_pipeline_docs.append(__lowerCamelCase )
# sort overall pipeline doc
_UpperCAmelCase : Union[str, Any] =clean_doc_toc(__lowerCamelCase )
if new_pipeline_docs != pipeline_docs:
_UpperCAmelCase : Optional[Any] =True
if overwrite:
_UpperCAmelCase : Optional[int] =new_pipeline_docs
if diff:
if overwrite:
_UpperCAmelCase : Optional[Any] =api_doc
with open(__lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(__lowerCamelCase , allow_unicode=__lowerCamelCase ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
lowercase =argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowercase =parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 331 | 1 |
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
__UpperCAmelCase = {
'''169M''': 12,
'''430M''': 24,
'''1B5''': 24,
'''3B''': 32,
'''7B''': 32,
'''14B''': 40,
}
__UpperCAmelCase = {
'''169M''': 768,
'''430M''': 1_024,
'''1B5''': 2_048,
'''3B''': 2_560,
'''7B''': 4_096,
'''14B''': 5_120,
}
def UpperCamelCase ( snake_case__ : Union[str, Any] ) -> Tuple:
UpperCamelCase : Union[str, Any] = list(state_dict.keys() )
for name in state_dict_keys:
UpperCamelCase : Optional[Any] = state_dict.pop(snake_case__ )
# emb -> embedding
if name.startswith('emb.' ):
UpperCamelCase : Optional[Any] = name.replace('emb.' , 'embeddings.' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('blocks.0.ln0' ):
UpperCamelCase : Optional[Any] = name.replace('blocks.0.ln0' , 'blocks.0.pre_ln' )
# att -> attention
UpperCamelCase : Optional[int] = re.sub(R'blocks\.(\d+)\.att' , R'blocks.\1.attention' , snake_case__ )
# ffn -> feed_forward
UpperCamelCase : Dict = re.sub(R'blocks\.(\d+)\.ffn' , R'blocks.\1.feed_forward' , snake_case__ )
# time_mix_k -> time_mix_key and reshape
if name.endswith('.time_mix_k' ):
UpperCamelCase : Tuple = name.replace('.time_mix_k' , '.time_mix_key' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('.time_mix_v' ):
UpperCamelCase : int = name.replace('.time_mix_v' , '.time_mix_value' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('.time_mix_r' ):
UpperCamelCase : Any = name.replace('.time_mix_r' , '.time_mix_receptance' )
if name != "head.weight":
UpperCamelCase : Tuple = 'rwkv.' + name
UpperCamelCase : Dict = weight
return state_dict
def UpperCamelCase ( snake_case__ : Union[str, Any] , snake_case__ : Any , snake_case__ : Dict , snake_case__ : Tuple=None , snake_case__ : Dict=None , snake_case__ : List[str]=False , snake_case__ : Union[str, Any]=None ) -> str:
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print('No `--tokenizer_file` provided, we will use the default tokenizer.' )
UpperCamelCase : Tuple = 50277
UpperCamelCase : Optional[int] = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b' )
else:
UpperCamelCase : List[Any] = PreTrainedTokenizerFast(tokenizer_file=snake_case__ )
UpperCamelCase : Tuple = len(snake_case__ )
tokenizer.save_pretrained(snake_case__ )
# 2. Build the config
UpperCamelCase : Optional[Any] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
UpperCamelCase : Union[str, Any] = candidate
break
if size is None:
raise ValueError('Could not infer the size, please provide it with the `--size` argument.' )
if size not in possible_sizes:
raise ValueError(F"""`size` should be one of {possible_sizes}, got {size}.""" )
UpperCamelCase : Optional[int] = RwkvConfig(
vocab_size=snake_case__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(snake_case__ )
# 3. Download model file then convert state_dict
UpperCamelCase : Optional[int] = hf_hub_download(snake_case__ , snake_case__ )
UpperCamelCase : Optional[Any] = torch.load(snake_case__ , map_location='cpu' )
UpperCamelCase : List[str] = convert_state_dict(snake_case__ )
# 4. Split in shards and save
UpperCamelCase , UpperCamelCase : Tuple = shard_checkpoint(snake_case__ )
for shard_file, shard in shards.items():
torch.save(snake_case__ , os.path.join(snake_case__ , snake_case__ ) )
if index is not None:
UpperCamelCase : List[Any] = os.path.join(snake_case__ , snake_case__ )
# Save the index as well
with open(snake_case__ , 'w' , encoding='utf-8' ) as f:
UpperCamelCase : str = json.dumps(snake_case__ , indent=2 , sort_keys=snake_case__ ) + '\n'
f.write(snake_case__ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.' )
UpperCamelCase : List[Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
UpperCamelCase : int = torch.load(os.path.join(snake_case__ , snake_case__ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(snake_case__ , snake_case__ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('Please provide a `model_name` to push the model to the Hub.' )
UpperCamelCase : Optional[int] = AutoModelForCausalLM.from_pretrained(snake_case__ )
model.push_to_hub(snake_case__ , max_shard_size='2GB' )
tokenizer.push_to_hub(snake_case__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--repo_id''', default=None, type=str, required=True, help='''Repo ID from which to pull the checkpoint.'''
)
parser.add_argument(
'''--checkpoint_file''', default=None, type=str, required=True, help='''Name of the checkpoint file in the repo.'''
)
parser.add_argument(
'''--output_dir''', default=None, type=str, required=True, help='''Where to save the converted model.'''
)
parser.add_argument(
'''--tokenizer_file''',
default=None,
type=str,
help='''Path to the tokenizer file to use (if not provided, only the model is converted).''',
)
parser.add_argument(
'''--size''',
default=None,
type=str,
help='''Size of the model. Will be inferred from the `checkpoint_file` if not passed.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Push to the Hub the converted model.''',
)
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''Name of the pushed model on the Hub, including the username / organization.''',
)
__UpperCAmelCase = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 40 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowerCAmelCase : str = '''
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior.to("cuda")
>>> prompt = "A red cartoon frog, 4k"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16
... )
>>> pipe.to("cuda")
>>> init_image = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/frog.png"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save("red_frog.png")
```
'''
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=8 ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_lowerCamelCase : Optional[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=512 , _lowerCamelCase=512 ) -> int:
'''simple docstring'''
_lowerCamelCase : int = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
_lowerCamelCase : Union[str, Any] = np.array(pil_image.convert("RGB" ) )
_lowerCamelCase : Any = arr.astype(np.floataa ) / 1_2_7.5 - 1
_lowerCamelCase : Optional[Any] = np.transpose(_lowerCamelCase , [2, 0, 1] )
_lowerCamelCase : Any = torch.from_numpy(_lowerCamelCase ).unsqueeze(0 )
return image
class A_ ( _a ):
def __init__( self: Any ,__lowerCAmelCase: UNetaDConditionModel ,__lowerCAmelCase: DDPMScheduler ,__lowerCAmelCase: VQModel ,):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=__lowerCAmelCase ,scheduler=__lowerCAmelCase ,movq=__lowerCAmelCase ,)
_lowerCamelCase : List[str] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _lowercase ( self: Dict ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : int = min(int(num_inference_steps * strength ) ,__lowerCAmelCase )
_lowerCamelCase : Tuple = max(num_inference_steps - init_timestep ,0 )
_lowerCamelCase : Optional[int] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Any ,__lowerCAmelCase: Any ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: List[str]=None ):
'''simple docstring'''
if not isinstance(__lowerCAmelCase ,(torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__lowerCAmelCase )}""" )
_lowerCamelCase : Any = image.to(device=__lowerCAmelCase ,dtype=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
_lowerCamelCase : List[Any] = image
else:
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) and len(__lowerCAmelCase ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(__lowerCAmelCase )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : List[Any] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__lowerCAmelCase )
]
_lowerCamelCase : Tuple = torch.cat(__lowerCAmelCase ,dim=0 )
else:
_lowerCamelCase : int = self.movq.encode(__lowerCAmelCase ).latent_dist.sample(__lowerCAmelCase )
_lowerCamelCase : int = self.movq.config.scaling_factor * init_latents
_lowerCamelCase : Tuple = torch.cat([init_latents] ,dim=0 )
_lowerCamelCase : Optional[int] = init_latents.shape
_lowerCamelCase : int = randn_tensor(__lowerCAmelCase ,generator=__lowerCAmelCase ,device=__lowerCAmelCase ,dtype=__lowerCAmelCase )
# get latents
_lowerCamelCase : Union[str, Any] = self.scheduler.add_noise(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : str = init_latents
return latents
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Optional[int]=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_lowerCamelCase : str = torch.device(F"""cuda:{gpu_id}""" )
_lowerCamelCase : Dict = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: int=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" ,"0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
_lowerCamelCase : List[str] = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" ,silence_dtype_warnings=__lowerCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_lowerCamelCase : str = None
for cpu_offloaded_model in [self.unet, self.movq]:
_lowerCamelCase, _lowerCamelCase : str = cpu_offload_with_hook(__lowerCAmelCase ,__lowerCAmelCase ,prev_module_hook=__lowerCAmelCase )
# We'll offload the last model manually.
_lowerCamelCase : int = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
if not hasattr(self.unet ,"_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__lowerCAmelCase ,"_hf_hook" )
and hasattr(module._hf_hook ,"execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__lowerCAmelCase )
def __call__( self: Dict ,__lowerCAmelCase: Union[torch.FloatTensor, List[torch.FloatTensor]] ,__lowerCAmelCase: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] ,__lowerCAmelCase: Union[torch.FloatTensor, List[torch.FloatTensor]] ,__lowerCAmelCase: int = 512 ,__lowerCAmelCase: int = 512 ,__lowerCAmelCase: int = 100 ,__lowerCAmelCase: float = 4.0 ,__lowerCAmelCase: float = 0.3 ,__lowerCAmelCase: int = 1 ,__lowerCAmelCase: Optional[Union[torch.Generator, List[torch.Generator]]] = None ,__lowerCAmelCase: Optional[str] = "pil" ,__lowerCAmelCase: bool = True ,):
'''simple docstring'''
_lowerCamelCase : Optional[int] = self._execution_device
_lowerCamelCase : Dict = guidance_scale > 1.0
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : int = torch.cat(__lowerCAmelCase ,dim=0 )
_lowerCamelCase : Any = image_embeds.shape[0]
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : str = torch.cat(__lowerCAmelCase ,dim=0 )
if do_classifier_free_guidance:
_lowerCamelCase : List[str] = image_embeds.repeat_interleave(__lowerCAmelCase ,dim=0 )
_lowerCamelCase : Optional[int] = negative_image_embeds.repeat_interleave(__lowerCAmelCase ,dim=0 )
_lowerCamelCase : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] ,dim=0 ).to(dtype=self.unet.dtype ,device=__lowerCAmelCase )
if not isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : Tuple = [image]
if not all(isinstance(__lowerCAmelCase ,(PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"""Input is in incorrect format: {[type(__lowerCAmelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
_lowerCamelCase : Union[str, Any] = torch.cat([prepare_image(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ) for i in image] ,dim=0 )
_lowerCamelCase : str = image.to(dtype=image_embeds.dtype ,device=__lowerCAmelCase )
_lowerCamelCase : Tuple = self.movq.encode(__lowerCAmelCase )["latents"]
_lowerCamelCase : List[str] = latents.repeat_interleave(__lowerCAmelCase ,dim=0 )
self.scheduler.set_timesteps(__lowerCAmelCase ,device=__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase : Optional[Any] = self.get_timesteps(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Any = timesteps[:1].repeat(batch_size * num_images_per_prompt )
_lowerCamelCase, _lowerCamelCase : Tuple = downscale_height_and_width(__lowerCAmelCase ,__lowerCAmelCase ,self.movq_scale_factor )
_lowerCamelCase : List[Any] = self.prepare_latents(
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,image_embeds.dtype ,__lowerCAmelCase ,__lowerCAmelCase )
for i, t in enumerate(self.progress_bar(__lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
_lowerCamelCase : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCamelCase : List[str] = {"image_embeds": image_embeds}
_lowerCamelCase : Tuple = self.unet(
sample=__lowerCAmelCase ,timestep=__lowerCAmelCase ,encoder_hidden_states=__lowerCAmelCase ,added_cond_kwargs=__lowerCAmelCase ,return_dict=__lowerCAmelCase ,)[0]
if do_classifier_free_guidance:
_lowerCamelCase, _lowerCamelCase : Tuple = noise_pred.split(latents.shape[1] ,dim=1 )
_lowerCamelCase, _lowerCamelCase : Dict = noise_pred.chunk(2 )
_lowerCamelCase, _lowerCamelCase : str = variance_pred.chunk(2 )
_lowerCamelCase : Tuple = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_lowerCamelCase : Any = torch.cat([noise_pred, variance_pred_text] ,dim=1 )
if not (
hasattr(self.scheduler.config ,"variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = noise_pred.split(latents.shape[1] ,dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_lowerCamelCase : Optional[int] = self.scheduler.step(
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,generator=__lowerCAmelCase ,)[0]
# post-processing
_lowerCamelCase : Optional[int] = self.movq.decode(__lowerCAmelCase ,force_not_quantize=__lowerCAmelCase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
_lowerCamelCase : Optional[int] = image * 0.5 + 0.5
_lowerCamelCase : str = image.clamp(0 ,1 )
_lowerCamelCase : Optional[int] = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if output_type == "pil":
_lowerCamelCase : str = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCAmelCase ) | 46 | 0 |
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
__A =logging.get_logger(__name__)
@add_end_docstrings(__UpperCamelCase )
class UpperCAmelCase__ ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , **a_ : str ):
'''simple docstring'''
super().__init__(**a_ )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
# No specific FOR_XXX available yet
def __call__( self : Tuple , a_ : Union[np.ndarray, bytes, str] , **a_ : Dict ):
'''simple docstring'''
return super().__call__(a_ , **a_ )
def snake_case__ ( self : str , **a_ : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = {}
if "candidate_labels" in kwargs:
__UpperCAmelCase : Union[str, Any] = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
__UpperCAmelCase : List[str] = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def snake_case__ ( self : Union[str, Any] , a_ : List[str] , a_ : Any=None , a_ : Optional[Any]="This is a sound of {}." ):
'''simple docstring'''
if isinstance(a_ , a_ ):
if audio.startswith('''http://''' ) or audio.startswith('''https://''' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
__UpperCAmelCase : Any = requests.get(a_ ).content
else:
with open(a_ , '''rb''' ) as f:
__UpperCAmelCase : List[str] = f.read()
if isinstance(a_ , a_ ):
__UpperCAmelCase : Union[str, Any] = ffmpeg_read(a_ , self.feature_extractor.sampling_rate )
if not isinstance(a_ , np.ndarray ):
raise ValueError('''We expect a numpy ndarray as input''' )
if len(audio.shape ) != 1:
raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' )
__UpperCAmelCase : List[Any] = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors='''pt''' )
__UpperCAmelCase : Union[str, Any] = candidate_labels
__UpperCAmelCase : Dict = [hypothesis_template.format(a_ ) for x in candidate_labels]
__UpperCAmelCase : Tuple = self.tokenizer(a_ , return_tensors=self.framework , padding=a_ )
__UpperCAmelCase : List[Any] = [text_inputs]
return inputs
def snake_case__ ( self : List[Any] , a_ : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = model_inputs.pop('''candidate_labels''' )
__UpperCAmelCase : Any = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , a_ ):
__UpperCAmelCase : Optional[Any] = text_inputs[0]
else:
# Batching case.
__UpperCAmelCase : Optional[int] = text_inputs[0][0]
__UpperCAmelCase : Optional[Any] = self.model(**a_ , **a_ )
__UpperCAmelCase : List[str] = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_audio,
}
return model_outputs
def snake_case__ ( self : Optional[int] , a_ : str ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = model_outputs.pop('''candidate_labels''' )
__UpperCAmelCase : Any = model_outputs['''logits'''][0]
if self.framework == "pt":
__UpperCAmelCase : Tuple = logits.softmax(dim=0 )
__UpperCAmelCase : Optional[Any] = probs.tolist()
else:
raise ValueError('''`tf` framework not supported.''' )
__UpperCAmelCase : Any = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(a_ , a_ ) , key=lambda a_ : -x[0] )
]
return result
| 710 |
def a ( _UpperCAmelCase : int = 1 , _UpperCAmelCase : int = 10_00 ):
'''simple docstring'''
__UpperCAmelCase : List[str] = 1
__UpperCAmelCase : Dict = 0
for divide_by_number in range(_UpperCAmelCase , digit + 1 ):
__UpperCAmelCase : list[int] = []
__UpperCAmelCase : List[str] = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(_UpperCAmelCase ):
__UpperCAmelCase : List[str] = len(_UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = divide_by_number
else:
has_been_divided.append(_UpperCAmelCase )
__UpperCAmelCase : Any = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 241 | 0 |
# using dfs for finding eulerian path traversal
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None ) -> Dict:
a = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
a , a = True, True
a = dfs(__a , __a , __a , __a )
return path
def __A ( __lowerCamelCase , __lowerCamelCase ) -> str:
a = 0
a = -1
for i in range(__a ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
a = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def __A ( __lowerCamelCase , __lowerCamelCase ) -> Any:
a = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
a , a = check_circuit_or_path(__a , __a )
if check == 3:
print("""graph is not Eulerian""" )
print("""no path""" )
return
a = 1
if check == 2:
a = odd_node
print("""graph has a Euler path""" )
if check == 1:
print("""graph has a Euler cycle""" )
a = dfs(__a , __a , __a )
print(__a )
def __A ( ) -> Dict:
a = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
a = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
a = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
a = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
a = {
1: [],
2: []
# all degree is zero
}
a = 10
check_euler(__a , __a )
check_euler(__a , __a )
check_euler(__a , __a )
check_euler(__a , __a )
check_euler(__a , __a )
if __name__ == "__main__":
main()
| 468 |
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
lowerCamelCase__ = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase__ = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
lowerCamelCase__ = re.compile(R'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
lowerCamelCase__ = re.compile(R'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCamelCase__ = re.compile(R'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
lowerCamelCase__ = [
('''pretraining''', '''MODEL_FOR_PRETRAINING_MAPPING_NAMES''', '''AutoModelForPreTraining'''),
('''feature-extraction''', '''MODEL_MAPPING_NAMES''', '''AutoModel'''),
('''audio-classification''', '''MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForAudioClassification'''),
('''text-generation''', '''MODEL_FOR_CAUSAL_LM_MAPPING_NAMES''', '''AutoModelForCausalLM'''),
('''automatic-speech-recognition''', '''MODEL_FOR_CTC_MAPPING_NAMES''', '''AutoModelForCTC'''),
('''image-classification''', '''MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForImageClassification'''),
('''image-segmentation''', '''MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES''', '''AutoModelForImageSegmentation'''),
('''fill-mask''', '''MODEL_FOR_MASKED_LM_MAPPING_NAMES''', '''AutoModelForMaskedLM'''),
('''object-detection''', '''MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES''', '''AutoModelForObjectDetection'''),
(
'''zero-shot-object-detection''',
'''MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES''',
'''AutoModelForZeroShotObjectDetection''',
),
('''question-answering''', '''MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES''', '''AutoModelForQuestionAnswering'''),
('''text2text-generation''', '''MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES''', '''AutoModelForSeq2SeqLM'''),
('''text-classification''', '''MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForSequenceClassification'''),
('''automatic-speech-recognition''', '''MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES''', '''AutoModelForSpeechSeq2Seq'''),
(
'''table-question-answering''',
'''MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForTableQuestionAnswering''',
),
('''token-classification''', '''MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForTokenClassification'''),
('''multiple-choice''', '''MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES''', '''AutoModelForMultipleChoice'''),
(
'''next-sentence-prediction''',
'''MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES''',
'''AutoModelForNextSentencePrediction''',
),
(
'''audio-frame-classification''',
'''MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES''',
'''AutoModelForAudioFrameClassification''',
),
('''audio-xvector''', '''MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES''', '''AutoModelForAudioXVector'''),
(
'''document-question-answering''',
'''MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForDocumentQuestionAnswering''',
),
(
'''visual-question-answering''',
'''MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForVisualQuestionAnswering''',
),
('''image-to-text''', '''MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES''', '''AutoModelForVision2Seq'''),
(
'''zero-shot-image-classification''',
'''MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES''',
'''AutoModelForZeroShotImageClassification''',
),
('''depth-estimation''', '''MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES''', '''AutoModelForDepthEstimation'''),
('''video-classification''', '''MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForVideoClassification'''),
('''mask-generation''', '''MODEL_FOR_MASK_GENERATION_MAPPING_NAMES''', '''AutoModelForMaskGeneration'''),
]
def A(__a: List[Any] ):
lowerCAmelCase_ = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" , __a )
return [m.group(0 ) for m in matches]
def A():
lowerCAmelCase_ = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
lowerCAmelCase_ = {
config.replace("Config" , "" ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
lowerCAmelCase_ = collections.defaultdict(__a )
lowerCAmelCase_ = collections.defaultdict(__a )
lowerCAmelCase_ = collections.defaultdict(__a )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(__a ):
lowerCAmelCase_ = None
if _re_tf_models.match(__a ) is not None:
lowerCAmelCase_ = tf_models
lowerCAmelCase_ = _re_tf_models.match(__a ).groups()[0]
elif _re_flax_models.match(__a ) is not None:
lowerCAmelCase_ = flax_models
lowerCAmelCase_ = _re_flax_models.match(__a ).groups()[0]
elif _re_pt_models.match(__a ) is not None:
lowerCAmelCase_ = pt_models
lowerCAmelCase_ = _re_pt_models.match(__a ).groups()[0]
if lookup_dict is not None:
while len(__a ) > 0:
if attr_name in model_prefix_to_model_type:
lowerCAmelCase_ = True
break
# Try again after removing the last word in the name
lowerCAmelCase_ = "".join(camel_case_split(__a )[:-1] )
lowerCAmelCase_ = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
lowerCAmelCase_ = list(__a )
all_models.sort()
lowerCAmelCase_ = {"model_type": all_models}
lowerCAmelCase_ = [pt_models[t] for t in all_models]
lowerCAmelCase_ = [tf_models[t] for t in all_models]
lowerCAmelCase_ = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
lowerCAmelCase_ = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
lowerCAmelCase_ = "AutoProcessor"
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
lowerCAmelCase_ = "AutoTokenizer"
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
lowerCAmelCase_ = "AutoFeatureExtractor"
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
lowerCAmelCase_ = "AutoTokenizer"
lowerCAmelCase_ = [processors[t] for t in all_models]
return pd.DataFrame(__a )
def A(__a: List[str] ):
lowerCAmelCase_ = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
lowerCAmelCase_ = [model_mapping, F"TF_{model_mapping}", F"FLAX_{model_mapping}"]
lowerCAmelCase_ = [auto_class, F"TF_{auto_class}", F"Flax_{auto_class}"]
# Loop through all three frameworks
for module, cls, mapping in zip(__a , __a , __a ):
# The type of pipeline may not exist in this framework
if not hasattr(__a , __a ):
continue
# First extract all model_names
lowerCAmelCase_ = []
for name in getattr(__a , __a ).values():
if isinstance(__a , __a ):
model_names.append(__a )
else:
model_names.extend(list(__a ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def A(__a: Tuple , __a: int ):
lowerCAmelCase_ = get_frameworks_table()
lowerCAmelCase_ = Dataset.from_pandas(__a )
lowerCAmelCase_ = hf_hub_download(
"huggingface/transformers-metadata" , "pipeline_tags.json" , repo_type="dataset" , token=__a )
lowerCAmelCase_ = Dataset.from_json(__a )
lowerCAmelCase_ = {
tags_dataset[i]["model_class"]: (tags_dataset[i]["pipeline_tag"], tags_dataset[i]["auto_class"])
for i in range(len(__a ) )
}
lowerCAmelCase_ = update_pipeline_and_auto_class_table(__a )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
lowerCAmelCase_ = sorted(table.keys() )
lowerCAmelCase_ = pd.DataFrame(
{
"model_class": model_classes,
"pipeline_tag": [table[m][0] for m in model_classes],
"auto_class": [table[m][1] for m in model_classes],
} )
lowerCAmelCase_ = Dataset.from_pandas(__a )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(__a , "frameworks.json" ) )
tags_dataset.to_json(os.path.join(__a , "pipeline_tags.json" ) )
if commit_sha is not None:
lowerCAmelCase_ = (
F"Update with commit {commit_sha}\n\nSee: "
F"https://github.com/huggingface/transformers/commit/{commit_sha}"
)
else:
lowerCAmelCase_ = "Update"
upload_folder(
repo_id="huggingface/transformers-metadata" , folder_path=__a , repo_type="dataset" , token=__a , commit_message=__a , )
def A():
lowerCAmelCase_ = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
lowerCAmelCase_ = transformers_module.pipelines.SUPPORTED_TASKS
lowerCAmelCase_ = []
for key in pipeline_tasks:
if key not in in_table:
lowerCAmelCase_ = pipeline_tasks[key]["pt"]
if isinstance(__a , (list, tuple) ):
lowerCAmelCase_ = model[0]
lowerCAmelCase_ = model.__name__
if model not in in_table.values():
missing.append(__a )
if len(__a ) > 0:
lowerCAmelCase_ = ", ".join(__a )
raise ValueError(
"The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside "
F"`utils/update_metadata.py`: {msg}. Please add them!" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument('''--token''', type=str, help='''The token to use to push to the transformers-metadata dataset.''')
parser.add_argument('''--commit_sha''', type=str, help='''The sha of the commit going with this update.''')
parser.add_argument('''--check-only''', action='''store_true''', help='''Activate to just check all pipelines are present.''')
lowerCamelCase__ = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 122 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
_UpperCAmelCase : str = {
"""facebook/data2vec-vision-base-ft""": (
"""https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"""
),
}
class lowerCAmelCase ( snake_case__ ):
UpperCAmelCase__ = """data2vec-vision"""
def __init__( self : Any , UpperCAmelCase : Optional[int]=768 , UpperCAmelCase : Optional[Any]=12 , UpperCAmelCase : Tuple=12 , UpperCAmelCase : Any=3072 , UpperCAmelCase : str="gelu" , UpperCAmelCase : List[Any]=0.0 , UpperCAmelCase : List[Any]=0.0 , UpperCAmelCase : Tuple=0.0_2 , UpperCAmelCase : List[Any]=1e-12 , UpperCAmelCase : Dict=224 , UpperCAmelCase : Dict=16 , UpperCAmelCase : str=3 , UpperCAmelCase : Any=False , UpperCAmelCase : str=False , UpperCAmelCase : Tuple=False , UpperCAmelCase : Optional[int]=False , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : int=True , UpperCAmelCase : List[Any]=[3, 5, 7, 11] , UpperCAmelCase : str=[1, 2, 3, 6] , UpperCAmelCase : str=True , UpperCAmelCase : List[str]=0.4 , UpperCAmelCase : Optional[Any]=256 , UpperCAmelCase : Any=1 , UpperCAmelCase : Any=False , UpperCAmelCase : List[Any]=255 , **UpperCAmelCase : Optional[Any] , ) -> Any:
super().__init__(**lowercase_ )
lowerCamelCase__ : int = hidden_size
lowerCamelCase__ : Dict = num_hidden_layers
lowerCamelCase__ : List[Any] = num_attention_heads
lowerCamelCase__ : int = intermediate_size
lowerCamelCase__ : str = hidden_act
lowerCamelCase__ : Tuple = hidden_dropout_prob
lowerCamelCase__ : Any = attention_probs_dropout_prob
lowerCamelCase__ : List[str] = initializer_range
lowerCamelCase__ : Any = layer_norm_eps
lowerCamelCase__ : Optional[int] = image_size
lowerCamelCase__ : str = patch_size
lowerCamelCase__ : Dict = num_channels
lowerCamelCase__ : Any = use_mask_token
lowerCamelCase__ : Dict = use_absolute_position_embeddings
lowerCamelCase__ : Union[str, Any] = use_relative_position_bias
lowerCamelCase__ : Any = use_shared_relative_position_bias
lowerCamelCase__ : Tuple = layer_scale_init_value
lowerCamelCase__ : Optional[Any] = drop_path_rate
lowerCamelCase__ : str = use_mean_pooling
# decode head attributes (semantic segmentation)
lowerCamelCase__ : Dict = out_indices
lowerCamelCase__ : Dict = pool_scales
# auxiliary head attributes (semantic segmentation)
lowerCamelCase__ : Any = use_auxiliary_head
lowerCamelCase__ : Union[str, Any] = auxiliary_loss_weight
lowerCamelCase__ : Any = auxiliary_channels
lowerCamelCase__ : int = auxiliary_num_convs
lowerCamelCase__ : List[str] = auxiliary_concat_input
lowerCamelCase__ : Any = semantic_loss_ignore_index
class lowerCAmelCase ( snake_case__ ):
UpperCAmelCase__ = version.parse("""1.11""" )
@property
def A_ ( self : Union[str, Any] ) -> str:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def A_ ( self : List[Any] ) -> Union[str, Any]:
return 1e-4
| 704 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_UpperCAmelCase : List[str] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""")
@dataclass
class lowerCAmelCase :
UpperCAmelCase__ = field(
default="""cifar10""", metadata={"""help""": """Name of a dataset from the datasets package"""} )
UpperCAmelCase__ = field(
default=__UpperCamelCase, metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
UpperCAmelCase__ = field(
default=__UpperCamelCase, metadata={"""help""": """The column name of the images in the files."""} )
UpperCAmelCase__ = field(default=__UpperCamelCase, metadata={"""help""": """A folder containing the training data."""} )
UpperCAmelCase__ = field(default=__UpperCamelCase, metadata={"""help""": """A folder containing the validation data."""} )
UpperCAmelCase__ = field(
default=0.15, metadata={"""help""": """Percent to split off of train for validation."""} )
UpperCAmelCase__ = field(
default=__UpperCamelCase, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
}, )
UpperCAmelCase__ = field(
default=__UpperCamelCase, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
}, )
def A_ ( self : Tuple ) -> Optional[Any]:
lowerCamelCase__ : Optional[int] = {}
if self.train_dir is not None:
lowerCamelCase__ : int = self.train_dir
if self.validation_dir is not None:
lowerCamelCase__ : Dict = self.validation_dir
lowerCamelCase__ : Union[str, Any] = data_files if data_files else None
@dataclass
class lowerCAmelCase :
UpperCAmelCase__ = field(
default=__UpperCamelCase, metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
}, )
UpperCAmelCase__ = field(
default=__UpperCamelCase, metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""} )
UpperCAmelCase__ = field(
default=__UpperCamelCase, metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
}, )
UpperCAmelCase__ = field(
default=__UpperCamelCase, metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
UpperCAmelCase__ = field(
default="""main""", metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""}, )
UpperCAmelCase__ = field(default=__UpperCamelCase, metadata={"""help""": """Name or path of preprocessor config."""} )
UpperCAmelCase__ = field(
default=__UpperCamelCase, metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
}, )
UpperCAmelCase__ = field(
default=0.75, metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""} )
UpperCAmelCase__ = field(
default=__UpperCamelCase, metadata={"""help""": """Whether or not to train with normalized pixel values as target."""} )
@dataclass
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = field(
default=1E-3, metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""} )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Tuple:
lowerCamelCase__ : str = torch.stack([example['pixel_values'] for example in examples] )
return {"pixel_values": pixel_values}
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase__ : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mae' , _UpperCAmelCase , _UpperCAmelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase__ : Optional[Any] = training_args.get_process_log_level()
logger.setLevel(_UpperCAmelCase )
transformers.utils.logging.set_verbosity(_UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowerCamelCase__ : Optional[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase__ : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
lowerCamelCase__ : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
lowerCamelCase__ : List[Any] = None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _UpperCAmelCase ) and data_args.train_val_split > 0.0:
lowerCamelCase__ : Optional[int] = ds['train'].train_test_split(data_args.train_val_split )
lowerCamelCase__ : List[str] = split['train']
lowerCamelCase__ : List[str] = split['test']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase__ : Union[str, Any] = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowerCamelCase__ : Tuple = ViTMAEConfig.from_pretrained(model_args.config_name , **_UpperCAmelCase )
elif model_args.model_name_or_path:
lowerCamelCase__ : Tuple = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **_UpperCAmelCase )
else:
lowerCamelCase__ : List[Any] = ViTMAEConfig()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
# adapt config
config.update(
{
'mask_ratio': model_args.mask_ratio,
'norm_pix_loss': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
lowerCamelCase__ : Optional[Any] = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **_UpperCAmelCase )
elif model_args.model_name_or_path:
lowerCamelCase__ : Any = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **_UpperCAmelCase )
else:
lowerCamelCase__ : List[Any] = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
lowerCamelCase__ : Dict = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
lowerCamelCase__ : Dict = ViTMAEForPreTraining(_UpperCAmelCase )
if training_args.do_train:
lowerCamelCase__ : Union[str, Any] = ds['train'].column_names
else:
lowerCamelCase__ : Any = ds['validation'].column_names
if data_args.image_column_name is not None:
lowerCamelCase__ : str = data_args.image_column_name
elif "image" in column_names:
lowerCamelCase__ : Tuple = 'image'
elif "img" in column_names:
lowerCamelCase__ : int = 'img'
else:
lowerCamelCase__ : str = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
lowerCamelCase__ : List[Any] = image_processor.size['shortest_edge']
else:
lowerCamelCase__ : Optional[int] = (image_processor.size['height'], image_processor.size['width'])
lowerCamelCase__ : Optional[Any] = Compose(
[
Lambda(lambda _UpperCAmelCase : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(_UpperCAmelCase , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(_UpperCAmelCase ):
lowerCamelCase__ : Tuple = [transforms(_UpperCAmelCase ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
lowerCamelCase__ : Optional[int] = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(_UpperCAmelCase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
lowerCamelCase__ : List[str] = (
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(_UpperCAmelCase )
# Compute absolute learning rate
lowerCamelCase__ : Union[str, Any] = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
lowerCamelCase__ : Tuple = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
lowerCamelCase__ : str = Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , )
# Training
if training_args.do_train:
lowerCamelCase__ : List[str] = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase__ : Optional[int] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase__ : Any = last_checkpoint
lowerCamelCase__ : Optional[int] = trainer.train(resume_from_checkpoint=_UpperCAmelCase )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowerCamelCase__ : Dict = trainer.evaluate()
trainer.log_metrics('eval' , _UpperCAmelCase )
trainer.save_metrics('eval' , _UpperCAmelCase )
# Write model card and (optionally) push to hub
lowerCamelCase__ : Union[str, Any] = {
'tasks': 'masked-auto-encoding',
'dataset': data_args.dataset_name,
'tags': ['masked-auto-encoding'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCAmelCase )
else:
trainer.create_model_card(**_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Optional[int]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 188 | 0 |
'''simple docstring'''
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCamelCase_ : Dict = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
UpperCamelCase_ : Optional[int] = direct_transformers_import(PATH_TO_TRANSFORMERS)
UpperCamelCase_ : Tuple = transformers.models.auto.configuration_auto.CONFIG_MAPPING
UpperCamelCase_ : Tuple = {
# used to compute the property `self.chunk_length`
'''EncodecConfig''': ['''overlap'''],
# used as `self.bert_model = BertModel(config, ...)`
'''DPRConfig''': True,
# not used in modeling files, but it's an important information
'''FSMTConfig''': ['''langs'''],
# used internally in the configuration class file
'''GPTNeoConfig''': ['''attention_types'''],
# used internally in the configuration class file
'''EsmConfig''': ['''is_folding_model'''],
# used during training (despite we don't have training script for these models yet)
'''Mask2FormerConfig''': ['''ignore_value'''],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'''OneFormerConfig''': ['''ignore_value''', '''norm'''],
# used during preprocessing and collation, see `collating_graphormer.py`
'''GraphormerConfig''': ['''spatial_pos_max'''],
# used internally in the configuration class file
'''T5Config''': ['''feed_forward_proj'''],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'''MT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''],
'''UMT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''],
# used internally in the configuration class file
'''LongT5Config''': ['''feed_forward_proj'''],
# used internally in the configuration class file
'''SwitchTransformersConfig''': ['''feed_forward_proj'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''BioGptConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''GLPNConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''SegformerConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''CvtConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''PerceiverConfig''': ['''layer_norm_eps'''],
# used internally to calculate the feature size
'''InformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate the feature size
'''TimeSeriesTransformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate the feature size
'''AutoformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate `mlp_dim`
'''SamVisionConfig''': ['''mlp_ratio'''],
# For (head) training, but so far not implemented
'''ClapAudioConfig''': ['''num_classes'''],
# Not used, but providing useful information to users
'''SpeechT5HifiGanConfig''': ['''sampling_rate'''],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'''CLIPSegConfig''': True,
'''DeformableDetrConfig''': True,
'''DetaConfig''': True,
'''DinatConfig''': True,
'''DonutSwinConfig''': True,
'''EfficientFormerConfig''': True,
'''FSMTConfig''': True,
'''JukeboxConfig''': True,
'''LayoutLMv2Config''': True,
'''MaskFormerSwinConfig''': True,
'''MT5Config''': True,
'''NatConfig''': True,
'''OneFormerConfig''': True,
'''PerceiverConfig''': True,
'''RagConfig''': True,
'''SpeechT5Config''': True,
'''SwinConfig''': True,
'''Swin2SRConfig''': True,
'''Swinv2Config''': True,
'''SwitchTransformersConfig''': True,
'''TableTransformerConfig''': True,
'''TapasConfig''': True,
'''TransfoXLConfig''': True,
'''UniSpeechConfig''': True,
'''UniSpeechSatConfig''': True,
'''WavLMConfig''': True,
'''WhisperConfig''': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'''JukeboxPriorConfig''': True,
# TODO: @Younes (for `is_decoder`)
'''Pix2StructTextConfig''': True,
}
)
def __a ( _UpperCamelCase: Tuple , _UpperCamelCase: Dict , _UpperCamelCase: Tuple , _UpperCamelCase: List[Any] ) -> List[str]:
"""simple docstring"""
_snake_case = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F"""config.{attribute}""" in modeling_source
or F"""getattr(config, \"{attribute}\"""" in modeling_source
or F"""getattr(self.config, \"{attribute}\"""" in modeling_source
):
_snake_case = True
# Deal with multi-line cases
elif (
re.search(
rF"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""" , _UpperCamelCase , )
is not None
):
_snake_case = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
_snake_case = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
_snake_case = [
"bos_index",
"eos_index",
"pad_index",
"unk_index",
"mask_index",
"image_size",
"use_cache",
"out_features",
"out_indices",
]
_snake_case = ["encoder_no_repeat_ngram_size"]
# Special cases to be allowed
_snake_case = True
if not attribute_used:
_snake_case = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
_snake_case = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
_snake_case = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
_snake_case = True
elif attribute.endswith("_token_id" ):
_snake_case = True
# configuration class specific cases
if not case_allowed:
_snake_case = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
_snake_case = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def __a ( _UpperCamelCase: Optional[Any] ) -> Optional[int]:
"""simple docstring"""
_snake_case = dict(inspect.signature(config_class.__init__ ).parameters )
_snake_case = [x for x in list(signature.keys() ) if x not in ["self", "kwargs"]]
_snake_case = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
_snake_case = {}
if len(config_class.attribute_map ) > 0:
_snake_case = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
_snake_case = inspect.getsourcefile(_UpperCamelCase )
_snake_case = os.path.dirname(_UpperCamelCase )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
_snake_case = [os.path.join(_UpperCamelCase , _UpperCamelCase ) for fn in os.listdir(_UpperCamelCase ) if fn.startswith("modeling_" )]
# Get the source code strings
_snake_case = []
for path in modeling_paths:
if os.path.isfile(_UpperCamelCase ):
with open(_UpperCamelCase ) as fp:
modeling_sources.append(fp.read() )
_snake_case = []
for config_param, default_value in zip(_UpperCamelCase , _UpperCamelCase ):
# `attributes` here is all the variant names for `config_param`
_snake_case = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
unused_attributes.append(attributes[0] )
return sorted(_UpperCamelCase )
def __a ( ) -> str:
"""simple docstring"""
_snake_case = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
_snake_case = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda _UpperCamelCase : inspect.isclass(_UpperCamelCase )
and issubclass(_UpperCamelCase , _UpperCamelCase )
and inspect.getmodule(_UpperCamelCase ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
_snake_case = check_config_attributes_being_used(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
_snake_case = unused_attributes
if len(_UpperCamelCase ) > 0:
_snake_case = "The following configuration classes contain unused attributes in the corresponding modeling files:\n"
for name, attributes in configs_with_unused_attributes.items():
error += F"""{name}: {attributes}\n"""
raise ValueError(_UpperCamelCase )
if __name__ == "__main__":
check_config_attributes()
| 185 |
'''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
UpperCamelCase_ : int = ['''text''', '''image''', '''audio''']
def __a ( _UpperCamelCase: List[str] ) -> Dict:
"""simple docstring"""
_snake_case = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3_000 ) )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
inputs.append(create_inputs(_UpperCamelCase ) )
else:
raise ValueError(F"""Invalid type requested: {input_type}""" )
return inputs
def __a ( _UpperCamelCase: List ) -> Dict:
"""simple docstring"""
_snake_case = []
for output in outputs:
if isinstance(_UpperCamelCase , (str, AgentText) ):
output_types.append("text" )
elif isinstance(_UpperCamelCase , (Image.Image, AgentImage) ):
output_types.append("image" )
elif isinstance(_UpperCamelCase , (torch.Tensor, AgentAudio) ):
output_types.append("audio" )
else:
raise ValueError(F"""Invalid output: {output}""" )
return output_types
@is_tool_test
class _a :
def _lowercase ( self ) -> Any:
self.assertTrue(hasattr(self.tool ,"inputs" ) )
self.assertTrue(hasattr(self.tool ,"outputs" ) )
_snake_case = self.tool.inputs
for _input in inputs:
if isinstance(_input ,_SCREAMING_SNAKE_CASE ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
_snake_case = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def _lowercase ( self ) -> Any:
_snake_case = create_inputs(self.tool.inputs )
_snake_case = self.tool(*_SCREAMING_SNAKE_CASE )
# There is a single output
if len(self.tool.outputs ) == 1:
_snake_case = [outputs]
self.assertListEqual(output_types(_SCREAMING_SNAKE_CASE ) ,self.tool.outputs )
def _lowercase ( self ) -> str:
self.assertTrue(hasattr(self.tool ,"description" ) )
self.assertTrue(hasattr(self.tool ,"default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def _lowercase ( self ) -> Tuple:
_snake_case = create_inputs(self.tool.inputs )
_snake_case = self.tool(*_SCREAMING_SNAKE_CASE )
if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
_snake_case = [outputs]
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) ,len(self.tool.outputs ) )
for output, output_type in zip(_SCREAMING_SNAKE_CASE ,self.tool.outputs ):
_snake_case = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) )
def _lowercase ( self ) -> Optional[Any]:
_snake_case = create_inputs(self.tool.inputs )
_snake_case = []
for _input, input_type in zip(_SCREAMING_SNAKE_CASE ,self.tool.inputs ):
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
_snake_case = self.tool(*_SCREAMING_SNAKE_CASE )
if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
_snake_case = [outputs]
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) ,len(self.tool.outputs ) )
| 185 | 1 |
"""simple docstring"""
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __a :
UpperCamelCase_ : str
UpperCamelCase_ : str = None
@staticmethod
def _SCREAMING_SNAKE_CASE ( )-> Union[str, Any]:
"""simple docstring"""
raise NotImplementedError
def _SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : str , **UpperCAmelCase_ : Dict )-> Union[str, Any]:
"""simple docstring"""
raise NotImplementedError
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase_ : str )-> Any:
"""simple docstring"""
raise NotImplementedError
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] )-> List[str]:
"""simple docstring"""
if not self.is_available():
raise RuntimeError(
f"You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}." )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] )-> int:
"""simple docstring"""
return f"`pip install {cls.pip_package or cls.name}`"
class __a ( _lowerCAmelCase ):
UpperCamelCase_ : List[str] = '''optuna'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( )-> Union[str, Any]:
"""simple docstring"""
return is_optuna_available()
def _SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
return run_hp_search_optuna(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase_ : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
return default_hp_space_optuna(UpperCAmelCase_ )
class __a ( _lowerCAmelCase ):
UpperCamelCase_ : Optional[int] = '''ray'''
UpperCamelCase_ : Tuple = '''\'ray[tune]\''''
@staticmethod
def _SCREAMING_SNAKE_CASE ( )-> Any:
"""simple docstring"""
return is_ray_available()
def _SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : str , **UpperCAmelCase_ : int )-> int:
"""simple docstring"""
return run_hp_search_ray(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase_ : str )-> Union[str, Any]:
"""simple docstring"""
return default_hp_space_ray(UpperCAmelCase_ )
class __a ( _lowerCAmelCase ):
UpperCamelCase_ : Union[str, Any] = '''sigopt'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( )-> List[str]:
"""simple docstring"""
return is_sigopt_available()
def _SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : str , **UpperCAmelCase_ : Tuple )-> Tuple:
"""simple docstring"""
return run_hp_search_sigopt(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase_ : Tuple )-> Optional[Any]:
"""simple docstring"""
return default_hp_space_sigopt(UpperCAmelCase_ )
class __a ( _lowerCAmelCase ):
UpperCamelCase_ : Any = '''wandb'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( )-> List[Any]:
"""simple docstring"""
return is_wandb_available()
def _SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : str , **UpperCAmelCase_ : List[str] )-> str:
"""simple docstring"""
return run_hp_search_wandb(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase_ : List[Any] )-> List[Any]:
"""simple docstring"""
return default_hp_space_wandb(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def lowerCamelCase__ ( )-> str:
"""simple docstring"""
UpperCamelCase = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(UpperCAmelCase_ ) > 0:
UpperCamelCase = available_backends[0].name
if len(UpperCAmelCase_ ) > 1:
logger.info(
F"{len(UpperCAmelCase_ )} hyperparameter search backends available. Using {name} as the default." )
return name
raise RuntimeError(
"No hyperparameter search backend available.\n"
+ "\n".join(
F" - To install {backend.name} run {backend.pip_install()}"
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 556 |
"""simple docstring"""
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __a :
UpperCamelCase_ : str
UpperCamelCase_ : str = None
@staticmethod
def _SCREAMING_SNAKE_CASE ( )-> Union[str, Any]:
"""simple docstring"""
raise NotImplementedError
def _SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : str , **UpperCAmelCase_ : Dict )-> Union[str, Any]:
"""simple docstring"""
raise NotImplementedError
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase_ : str )-> Any:
"""simple docstring"""
raise NotImplementedError
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] )-> List[str]:
"""simple docstring"""
if not self.is_available():
raise RuntimeError(
f"You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}." )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] )-> int:
"""simple docstring"""
return f"`pip install {cls.pip_package or cls.name}`"
class __a ( _lowerCAmelCase ):
UpperCamelCase_ : List[str] = '''optuna'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( )-> Union[str, Any]:
"""simple docstring"""
return is_optuna_available()
def _SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
return run_hp_search_optuna(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase_ : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
return default_hp_space_optuna(UpperCAmelCase_ )
class __a ( _lowerCAmelCase ):
UpperCamelCase_ : Optional[int] = '''ray'''
UpperCamelCase_ : Tuple = '''\'ray[tune]\''''
@staticmethod
def _SCREAMING_SNAKE_CASE ( )-> Any:
"""simple docstring"""
return is_ray_available()
def _SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : str , **UpperCAmelCase_ : int )-> int:
"""simple docstring"""
return run_hp_search_ray(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase_ : str )-> Union[str, Any]:
"""simple docstring"""
return default_hp_space_ray(UpperCAmelCase_ )
class __a ( _lowerCAmelCase ):
UpperCamelCase_ : Union[str, Any] = '''sigopt'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( )-> List[str]:
"""simple docstring"""
return is_sigopt_available()
def _SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : str , **UpperCAmelCase_ : Tuple )-> Tuple:
"""simple docstring"""
return run_hp_search_sigopt(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase_ : Tuple )-> Optional[Any]:
"""simple docstring"""
return default_hp_space_sigopt(UpperCAmelCase_ )
class __a ( _lowerCAmelCase ):
UpperCamelCase_ : Any = '''wandb'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( )-> List[Any]:
"""simple docstring"""
return is_wandb_available()
def _SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : str , **UpperCAmelCase_ : List[str] )-> str:
"""simple docstring"""
return run_hp_search_wandb(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase_ : List[Any] )-> List[Any]:
"""simple docstring"""
return default_hp_space_wandb(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def lowerCamelCase__ ( )-> str:
"""simple docstring"""
UpperCamelCase = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(UpperCAmelCase_ ) > 0:
UpperCamelCase = available_backends[0].name
if len(UpperCAmelCase_ ) > 1:
logger.info(
F"{len(UpperCAmelCase_ )} hyperparameter search backends available. Using {name} as the default." )
return name
raise RuntimeError(
"No hyperparameter search backend available.\n"
+ "\n".join(
F" - To install {backend.name} run {backend.pip_install()}"
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 556 | 1 |
"""simple docstring"""
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def _UpperCAmelCase ( __lowerCamelCase : Any ) -> int:
_snake_case = VideoMAEConfig()
set_architecture_configs(__lowerCamelCase , __lowerCamelCase )
if "finetuned" not in model_name:
_snake_case = False
if "finetuned" in model_name:
_snake_case = '''huggingface/label-files'''
if "kinetics" in model_name:
_snake_case = 4_00
_snake_case = '''kinetics400-id2label.json'''
elif "ssv2" in model_name:
_snake_case = 1_74
_snake_case = '''something-something-v2-id2label.json'''
else:
raise ValueError('''Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.''' )
_snake_case = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) )
_snake_case = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
return config
def _UpperCAmelCase ( __lowerCamelCase : Any , __lowerCamelCase : Dict ) -> List[str]:
if "small" in model_name:
_snake_case = 3_84
_snake_case = 15_36
_snake_case = 12
_snake_case = 16
_snake_case = 12
_snake_case = 3
_snake_case = 1_92
_snake_case = 7_68
elif "large" in model_name:
_snake_case = 10_24
_snake_case = 40_96
_snake_case = 24
_snake_case = 16
_snake_case = 12
_snake_case = 8
_snake_case = 5_12
_snake_case = 20_48
elif "huge" in model_name:
_snake_case = 12_80
_snake_case = 51_20
_snake_case = 32
_snake_case = 16
_snake_case = 12
_snake_case = 8
_snake_case = 6_40
_snake_case = 25_60
elif "base" not in model_name:
raise ValueError('''Model name should include either "small", "base", "large", or "huge"''' )
def _UpperCAmelCase ( __lowerCamelCase : Any ) -> Optional[int]:
if "encoder." in name:
_snake_case = name.replace('''encoder.''' , '''''' )
if "cls_token" in name:
_snake_case = name.replace('''cls_token''' , '''videomae.embeddings.cls_token''' )
if "decoder_pos_embed" in name:
_snake_case = name.replace('''decoder_pos_embed''' , '''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
_snake_case = name.replace('''pos_embed''' , '''videomae.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
_snake_case = name.replace('''patch_embed.proj''' , '''videomae.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
_snake_case = name.replace('''patch_embed.norm''' , '''videomae.embeddings.norm''' )
if "decoder.blocks" in name:
_snake_case = name.replace('''decoder.blocks''' , '''decoder.decoder_layers''' )
if "blocks" in name:
_snake_case = name.replace('''blocks''' , '''videomae.encoder.layer''' )
if "attn.proj" in name:
_snake_case = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "bias" not in name:
_snake_case = name.replace('''attn''' , '''attention.self''' )
if "attn" in name:
_snake_case = name.replace('''attn''' , '''attention.attention''' )
if "norm1" in name:
_snake_case = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
_snake_case = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
_snake_case = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
_snake_case = name.replace('''mlp.fc2''' , '''output.dense''' )
if "decoder_embed" in name:
_snake_case = name.replace('''decoder_embed''' , '''decoder.decoder_embed''' )
if "decoder_norm" in name:
_snake_case = name.replace('''decoder_norm''' , '''decoder.decoder_norm''' )
if "decoder_pred" in name:
_snake_case = name.replace('''decoder_pred''' , '''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
_snake_case = name.replace('''norm.weight''' , '''videomae.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
_snake_case = name.replace('''norm.bias''' , '''videomae.layernorm.bias''' )
if "head" in name and "decoder" not in name:
_snake_case = name.replace('''head''' , '''classifier''' )
return name
def _UpperCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : Optional[Any] ) -> Union[str, Any]:
for key in orig_state_dict.copy().keys():
_snake_case = orig_state_dict.pop(__lowerCamelCase )
if key.startswith('''encoder.''' ):
_snake_case = key.replace('''encoder.''' , '''''' )
if "qkv" in key:
_snake_case = key.split('''.''' )
if key.startswith('''decoder.blocks''' ):
_snake_case = config.decoder_hidden_size
_snake_case = int(key_split[2] )
_snake_case = '''decoder.decoder_layers.'''
if "weight" in key:
_snake_case = val[:dim, :]
_snake_case = val[dim : dim * 2, :]
_snake_case = val[-dim:, :]
else:
_snake_case = config.hidden_size
_snake_case = int(key_split[1] )
_snake_case = '''videomae.encoder.layer.'''
if "weight" in key:
_snake_case = val[:dim, :]
_snake_case = val[dim : dim * 2, :]
_snake_case = val[-dim:, :]
else:
_snake_case = val
return orig_state_dict
def _UpperCAmelCase ( ) -> Dict:
_snake_case = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
_snake_case = np.load(__lowerCamelCase )
return list(__lowerCamelCase )
def _UpperCAmelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[int] ) -> Union[str, Any]:
_snake_case = get_videomae_config(__lowerCamelCase )
if "finetuned" in model_name:
_snake_case = VideoMAEForVideoClassification(__lowerCamelCase )
else:
_snake_case = VideoMAEForPreTraining(__lowerCamelCase )
# download original checkpoint, hosted on Google Drive
_snake_case = '''pytorch_model.bin'''
gdown.cached_download(__lowerCamelCase , __lowerCamelCase , quiet=__lowerCamelCase )
_snake_case = torch.load(__lowerCamelCase , map_location='''cpu''' )
if "model" in files:
_snake_case = files['''model''']
else:
_snake_case = files['''module''']
_snake_case = convert_state_dict(__lowerCamelCase , __lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
model.eval()
# verify model on basic input
_snake_case = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
_snake_case = prepare_video()
_snake_case = image_processor(__lowerCamelCase , return_tensors='''pt''' )
if "finetuned" not in model_name:
_snake_case = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' )
_snake_case = torch.load(__lowerCamelCase )
_snake_case = model(**__lowerCamelCase )
_snake_case = outputs.logits
_snake_case = [
'''videomae-small-finetuned-kinetics''',
'''videomae-small-finetuned-ssv2''',
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
'''videomae-base-short''',
'''videomae-base-short-finetuned-kinetics''',
'''videomae-base''',
'''videomae-base-finetuned-kinetics''',
'''videomae-large''',
'''videomae-large-finetuned-kinetics''',
'''videomae-huge-finetuned-kinetics''',
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
'''videomae-base-short-ssv2''',
'''videomae-base-short-finetuned-ssv2''',
'''videomae-base-ssv2''',
'''videomae-base-finetuned-ssv2''',
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
_snake_case = torch.Size([1, 4_00] )
_snake_case = torch.tensor([-0.9_291, -0.4_061, -0.9_307] )
elif model_name == "videomae-small-finetuned-ssv2":
_snake_case = torch.Size([1, 1_74] )
_snake_case = torch.tensor([0.2_671, -0.4_689, -0.8_235] )
elif model_name == "videomae-base":
_snake_case = torch.Size([1, 14_08, 15_36] )
_snake_case = torch.tensor([[0.7_739, 0.7_968, 0.7_089], [0.6_701, 0.7_487, 0.6_209], [0.4_287, 0.5_158, 0.4_773]] )
elif model_name == "videomae-base-short":
_snake_case = torch.Size([1, 14_08, 15_36] )
_snake_case = torch.tensor([[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] )
# we verified the loss both for normalized and unnormalized targets for this one
_snake_case = torch.tensor([0.5_142] ) if config.norm_pix_loss else torch.tensor([0.6_469] )
elif model_name == "videomae-large":
_snake_case = torch.Size([1, 14_08, 15_36] )
_snake_case = torch.tensor([[0.7_149, 0.7_997, 0.6_966], [0.6_768, 0.7_869, 0.6_948], [0.5_139, 0.6_221, 0.5_605]] )
elif model_name == "videomae-large-finetuned-kinetics":
_snake_case = torch.Size([1, 4_00] )
_snake_case = torch.tensor([0.0_771, 0.0_011, -0.3_625] )
elif model_name == "videomae-huge-finetuned-kinetics":
_snake_case = torch.Size([1, 4_00] )
_snake_case = torch.tensor([0.2_433, 0.1_632, -0.4_894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
_snake_case = torch.Size([1, 4_00] )
_snake_case = torch.tensor([0.6_588, 0.0_990, -0.2_493] )
elif model_name == "videomae-base-finetuned-kinetics":
_snake_case = torch.Size([1, 4_00] )
_snake_case = torch.tensor([0.3_669, -0.0_688, -0.2_421] )
elif model_name == "videomae-base-short-ssv2":
_snake_case = torch.Size([1, 14_08, 15_36] )
_snake_case = torch.tensor([[0.4_712, 0.5_296, 0.5_786], [0.2_278, 0.2_729, 0.4_026], [0.0_352, 0.0_730, 0.2_506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
_snake_case = torch.Size([1, 1_74] )
_snake_case = torch.tensor([-0.0_537, -0.1_539, -0.3_266] )
elif model_name == "videomae-base-ssv2":
_snake_case = torch.Size([1, 14_08, 15_36] )
_snake_case = torch.tensor([[0.8_131, 0.8_727, 0.8_546], [0.7_366, 0.9_377, 0.8_870], [0.5_935, 0.8_874, 0.8_564]] )
elif model_name == "videomae-base-finetuned-ssv2":
_snake_case = torch.Size([1, 1_74] )
_snake_case = torch.tensor([0.1_961, -0.8_337, -0.6_389] )
else:
raise ValueError(f'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , __lowerCamelCase , atol=1E-4 )
else:
print('''Logits:''' , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , __lowerCamelCase , atol=1E-4 )
print('''Logits ok!''' )
# verify loss, if applicable
if model_name == "videomae-base-short":
_snake_case = outputs.loss
assert torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-4 )
print('''Loss ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
if push_to_hub:
print('''Pushing to the hub...''' )
model.push_to_hub(__lowerCamelCase , organization='''nielsr''' )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4',
type=str,
help=(
'URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'
' download link.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='/Users/nielsrogge/Documents/VideoMAE/Test',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--model_name', default='videomae-base', type=str, help='Name of the model.')
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
UpperCAmelCase__ = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 224 |
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
UpperCAmelCase__ = '\\n@inproceedings{snover-etal-2006-study,\n title = "A Study of Translation Edit Rate with Targeted Human Annotation",\n author = "Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John",\n booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",\n month = aug # " 8-12",\n year = "2006",\n address = "Cambridge, Massachusetts, USA",\n publisher = "Association for Machine Translation in the Americas",\n url = "https://aclanthology.org/2006.amta-papers.25",\n pages = "223--231",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
UpperCAmelCase__ = '\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n'
UpperCAmelCase__ = '\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n \'score\' (float): TER score (num_edits / sum_ref_lengths * 100)\n \'num_edits\' (int): The cumulative number of edits\n \'ref_length\' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}\n\n Example 2:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}\n\n Example 3:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}\n\n Example 4:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}\n\n Example 5:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
def lowercase ( self : List[str] ):
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''http://www.cs.umd.edu/~snover/tercom/''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] , reference_urls=[
'''https://github.com/jhclark/tercom''',
] , )
def lowercase ( self : Optional[int] , _lowerCamelCase : int , _lowerCamelCase : Optional[int] , _lowerCamelCase : bool = False , _lowerCamelCase : bool = False , _lowerCamelCase : bool = False , _lowerCamelCase : bool = False , ):
_snake_case = len(references[0] )
if any(len(_lowerCamelCase ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
_snake_case = [[refs[i] for refs in references] for i in range(_lowerCamelCase )]
_snake_case = TER(
normalized=_lowerCamelCase , no_punct=_lowerCamelCase , asian_support=_lowerCamelCase , case_sensitive=_lowerCamelCase , )
_snake_case = sb_ter.corpus_score(_lowerCamelCase , _lowerCamelCase )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 224 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
__lowerCamelCase = {
'''configuration_audio_spectrogram_transformer''': [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ASTConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ASTForAudioClassification''',
'''ASTModel''',
'''ASTPreTrainedModel''',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ['''ASTFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 716 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A__ ( _snake_case ):
lowercase = "ClapFeatureExtractor"
lowercase = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = kwargs.pop("""sampling_rate""" , UpperCamelCase__ )
if text is None and audios is None:
raise ValueError("""You have to specify either text or audios. Both cannot be none.""" )
if text is not None:
A_ = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if audios is not None:
A_ = self.feature_extractor(
UpperCamelCase__ , sampling_rate=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if text is not None and audios is not None:
A_ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase__ ) , tensor_type=UpperCamelCase__ )
def snake_case_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def snake_case_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = self.tokenizer.model_input_names
A_ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 667 | 0 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
SCREAMING_SNAKE_CASE = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] = None , UpperCAmelCase : Dict = None , UpperCAmelCase : List[str] = None , UpperCAmelCase : int = True , ) -> Any:
'''simple docstring'''
lowercase : List[Any] =[file for file in os.listdir(UpperCAmelCase ) if os.path.isfile(os.path.join(UpperCAmelCase , UpperCAmelCase ) )]
if identifier is not None:
lowercase : Tuple =[file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
for n_ in n_identifier:
lowercase : List[str] =[file for file in files if n_ not in file]
else:
lowercase : Union[str, Any] =[file for file in files if n_identifier not in file]
lowercase : Tuple =ignore_files or []
ignore_files.append('''__init__.py''' )
lowercase : str =[file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('''Testing''' , UpperCAmelCase )
if only_modules:
lowercase : Dict =file.split('''.''' )[0]
try:
lowercase : Union[str, Any] =getattr(UpperCAmelCase , UpperCAmelCase )
lowercase : Dict =doctest.DocTestSuite(UpperCAmelCase )
lowercase : List[Any] =unittest.TextTestRunner().run(UpperCAmelCase )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f'{module_identifier} is not a module.' )
else:
lowercase : Optional[int] =doctest.testfile(str('''..''' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def A__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
lowercase : str =Path('''src/transformers''' )
lowercase : Tuple ="""modeling"""
lowercase : Union[str, Any] =[
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(UpperCAmelCase , identifier=UpperCAmelCase , ignore_files=UpperCAmelCase )
def A__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
lowercase : List[Any] =Path('''src/transformers''' )
lowercase : List[str] ="""tokenization"""
self.analyze_directory(UpperCAmelCase , identifier=UpperCAmelCase )
def A__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
lowercase : List[Any] =Path('''src/transformers''' )
lowercase : Optional[int] ="""configuration"""
self.analyze_directory(UpperCAmelCase , identifier=UpperCAmelCase )
def A__ ( self : int ) -> List[Any]:
'''simple docstring'''
lowercase : List[Any] =Path('''src/transformers''' )
lowercase : Optional[Any] =["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(UpperCAmelCase , n_identifier=UpperCAmelCase )
def A__ ( self : List[str] ) -> Dict:
'''simple docstring'''
lowercase : Optional[int] =Path('''docs/source''' )
lowercase : List[Any] =["""favicon.ico"""]
self.analyze_directory(UpperCAmelCase , ignore_files=UpperCAmelCase , only_modules=UpperCAmelCase )
| 94 |
from __future__ import annotations
import numpy as np
def A__ ( _a : np.ndarray ):
'''simple docstring'''
snake_case__ , snake_case__ : str =np.shape(_a )
if rows != columns:
snake_case__ : Any =(
"""'table' has to be of square shaped array but got a """
f"{rows}x{columns} array:\n{table}"
)
raise ValueError(_a )
snake_case__ : Dict =np.zeros((rows, columns) )
snake_case__ : str =np.zeros((rows, columns) )
for i in range(_a ):
for j in range(_a ):
snake_case__ : Optional[int] =sum(lower[i][k] * upper[k][j] for k in range(_a ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
snake_case__ : List[Any] =(table[i][j] - total) / upper[j][j]
snake_case__ : Optional[int] =1
for j in range(_a , _a ):
snake_case__ : int =sum(lower[i][k] * upper[k][j] for k in range(_a ) )
snake_case__ : Dict =table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 385 | 0 |
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
UpperCamelCase_ = HfArgumentParser(InitializationArguments)
UpperCamelCase_ = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
UpperCamelCase_ = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
UpperCamelCase_ = {
'vocab_size': len(tokenizer),
'scale_attn_by_inverse_layer_idx': True,
'reorder_and_upcast_attn': True,
}
# Load model config (GPT-2 large in this case)
UpperCamelCase_ = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
UpperCamelCase_ = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 142 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
UpperCamelCase_ = get_logger()
UpperCamelCase_ = None
class _SCREAMING_SNAKE_CASE ( TensorFormatter[Mapping, '''jax.Array''', Mapping] ):
def __init__(self , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase):
'''simple docstring'''
super().__init__(features=UpperCAmelCase)
import jax
from jaxlib.xla_client import Device
if isinstance(UpperCAmelCase , UpperCAmelCase):
raise ValueError(
f"""Expected {device} to be a `str` not {type(UpperCAmelCase)}, as `jaxlib.xla_extension.Device` """
'''is not serializable neither with `pickle` nor with `dill`. Instead you can surround '''
'''the device with `str()` to get its string identifier that will be internally mapped '''
'''to the actual `jaxlib.xla_extension.Device`.''')
__UpperCAmelCase =device if isinstance(UpperCAmelCase , UpperCAmelCase) else str(jax.devices()[0])
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
__UpperCAmelCase =self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys()):
logger.warning(
f"""Device with string identifier {self.device} not listed among the available """
f"""devices: {list(DEVICE_MAPPING.keys())}, so falling back to the default """
f"""device: {str(jax.devices()[0])}.""")
__UpperCAmelCase =str(jax.devices()[0])
__UpperCAmelCase =jnp_array_kwargs
@staticmethod
def A__ ():
'''simple docstring'''
import jax
return {str(UpperCAmelCase): device for device in jax.devices()}
def A__ (self , UpperCAmelCase):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(UpperCAmelCase , UpperCAmelCase) and column:
if all(
isinstance(UpperCAmelCase , jax.Array) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column):
return jnp.stack(UpperCAmelCase , axis=0)
return column
def A__ (self , UpperCAmelCase):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(UpperCAmelCase , (str, bytes, type(UpperCAmelCase))):
return value
elif isinstance(UpperCAmelCase , (np.character, np.ndarray)) and np.issubdtype(value.dtype , np.character):
return value.tolist()
__UpperCAmelCase ={}
if isinstance(UpperCAmelCase , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.integer):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
__UpperCAmelCase ={'''dtype''': jnp.intaa}
else:
__UpperCAmelCase ={'''dtype''': jnp.intaa}
elif isinstance(UpperCAmelCase , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.floating):
__UpperCAmelCase ={'''dtype''': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCAmelCase , PIL.Image.Image):
__UpperCAmelCase =np.asarray(UpperCAmelCase)
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
__UpperCAmelCase =self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device]):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(UpperCAmelCase , **{**default_dtype, **self.jnp_array_kwargs})
def A__ (self , UpperCAmelCase):
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(UpperCAmelCase , torch.Tensor):
return self._tensorize(data_struct.detach().cpu().numpy()[()])
if hasattr(UpperCAmelCase , '''__array__''') and not isinstance(UpperCAmelCase , jax.Array):
__UpperCAmelCase =data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCAmelCase , np.ndarray):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCAmelCase) for substruct in data_struct])
elif isinstance(UpperCAmelCase , (list, tuple)):
return self._consolidate([self.recursive_tensorize(UpperCAmelCase) for substruct in data_struct])
return self._tensorize(UpperCAmelCase)
def A__ (self , UpperCAmelCase):
'''simple docstring'''
return map_nested(self._recursive_tensorize , UpperCAmelCase , map_list=UpperCAmelCase)
def A__ (self , UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =self.numpy_arrow_extractor().extract_row(UpperCAmelCase)
__UpperCAmelCase =self.python_features_decoder.decode_row(UpperCAmelCase)
return self.recursive_tensorize(UpperCAmelCase)
def A__ (self , UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =self.numpy_arrow_extractor().extract_column(UpperCAmelCase)
__UpperCAmelCase =self.python_features_decoder.decode_column(UpperCAmelCase , pa_table.column_names[0])
__UpperCAmelCase =self.recursive_tensorize(UpperCAmelCase)
__UpperCAmelCase =self._consolidate(UpperCAmelCase)
return column
def A__ (self , UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =self.numpy_arrow_extractor().extract_batch(UpperCAmelCase)
__UpperCAmelCase =self.python_features_decoder.decode_batch(UpperCAmelCase)
__UpperCAmelCase =self.recursive_tensorize(UpperCAmelCase)
for column_name in batch:
__UpperCAmelCase =self._consolidate(batch[column_name])
return batch
| 142 | 1 |
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def a__ ( lowercase__ ):
'''simple docstring'''
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class A ( __lowercase ):
@staticmethod
def lowerCAmelCase__ ( _lowerCAmelCase: ArgumentParser ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =parser.add_parser("download" )
download_parser.add_argument(
"--cache-dir" , type=_lowerCAmelCase , default=_lowerCAmelCase , help="Path to location to store the models" )
download_parser.add_argument(
"--force" , action="store_true" , help="Force the model to be download even if already in cache-dir" )
download_parser.add_argument(
"--trust-remote-code" , action="store_true" , help="Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine" , )
download_parser.add_argument("model" , type=_lowerCAmelCase , help="Name of the model to download" )
download_parser.set_defaults(func=_lowerCAmelCase )
def __init__( self: Tuple , _lowerCAmelCase: str , _lowerCAmelCase: str , _lowerCAmelCase: bool , _lowerCAmelCase: bool ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =model
UpperCAmelCase_ =cache
UpperCAmelCase_ =force
UpperCAmelCase_ =trust_remote_code
def lowerCAmelCase__ ( self: Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 54 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase__ : List[Any] = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''')
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ = PegasusTokenizer
UpperCamelCase_ = PegasusTokenizerFast
UpperCamelCase_ = True
UpperCamelCase_ = True
def lowerCAmelCase__ ( self) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase__ : Any = PegasusTokenizer(UpperCamelCase)
tokenizer.save_pretrained(self.tmpdirname)
@cached_property
def lowerCAmelCase__ ( self) -> int:
return PegasusTokenizer.from_pretrained('google/pegasus-large')
def lowerCAmelCase__ ( self , **UpperCamelCase) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase)
def lowerCAmelCase__ ( self , UpperCamelCase) -> Optional[int]:
return ("This is a test", "This is a test")
def lowerCAmelCase__ ( self) -> Dict:
UpperCamelCase__ : List[str] = '</s>'
UpperCamelCase__ : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase) , UpperCamelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase) , UpperCamelCase)
def lowerCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase__ : int = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<pad>')
self.assertEqual(vocab_keys[1] , '</s>')
self.assertEqual(vocab_keys[-1] , 'v')
self.assertEqual(len(UpperCamelCase) , 11_03)
def lowerCAmelCase__ ( self) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 11_03)
def lowerCAmelCase__ ( self) -> List[Any]:
UpperCamelCase__ : List[str] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname)
UpperCamelCase__ : Any = self.tokenizer_class.from_pretrained(self.tmpdirname)
UpperCamelCase__ : Union[str, Any] = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
UpperCamelCase__ : str = rust_tokenizer([raw_input_str] , return_tensors=UpperCamelCase , add_special_tokens=UpperCamelCase).input_ids[0]
UpperCamelCase__ : int = py_tokenizer([raw_input_str] , return_tensors=UpperCamelCase , add_special_tokens=UpperCamelCase).input_ids[0]
self.assertListEqual(UpperCamelCase , UpperCamelCase)
def lowerCAmelCase__ ( self) -> List[Any]:
UpperCamelCase__ : int = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
UpperCamelCase__ : Optional[Any] = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
UpperCamelCase__ : List[str] = [2, 4_13, 6_15, 1_14, 3, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
UpperCamelCase__ : Union[str, Any] = tokenizer([raw_input_str] , return_tensors=UpperCamelCase).input_ids[0]
self.assertListEqual(UpperCamelCase , UpperCamelCase)
def lowerCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase__ : int = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_61_03
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_03
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_05
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 10_24
UpperCamelCase__ : List[Any] = 'To ensure a smooth flow of bank resolutions.'
UpperCamelCase__ : Tuple = [4_13, 6_15, 1_14, 22_91, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
UpperCamelCase__ : int = tokenizer([raw_input_str] , return_tensors=UpperCamelCase).input_ids[0]
self.assertListEqual(UpperCamelCase , UpperCamelCase)
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3]) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def lowerCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase__ : Optional[int] = ['This is going to be way too long.' * 1_50, 'short example']
UpperCamelCase__ : str = ['not super long but more than 5 tokens', 'tiny']
UpperCamelCase__ : Optional[int] = self._large_tokenizer(UpperCamelCase , padding=UpperCamelCase , truncation=UpperCamelCase , return_tensors='pt')
UpperCamelCase__ : List[str] = self._large_tokenizer(
text_target=UpperCamelCase , max_length=5 , padding=UpperCamelCase , truncation=UpperCamelCase , return_tensors='pt')
assert batch.input_ids.shape == (2, 10_24)
assert batch.attention_mask.shape == (2, 10_24)
assert targets["input_ids"].shape == (2, 5)
assert len(UpperCamelCase) == 2 # input_ids, attention_mask.
@slow
def lowerCAmelCase__ ( self) -> Dict:
# fmt: off
UpperCamelCase__ : Union[str, Any] = {'input_ids': [[3_89_79, 1_43, 1_84_85, 6_06, 1_30, 2_66_69, 8_76_86, 1_21, 5_41_89, 11_29, 1_11, 2_66_69, 8_76_86, 1_21, 91_14, 1_47_87, 1_21, 1_32_49, 1_58, 5_92, 9_56, 1_21, 1_46_21, 3_15_76, 1_43, 6_26_13, 1_08, 96_88, 9_30, 4_34_30, 1_15_62, 6_26_13, 3_04, 1_08, 1_14_43, 8_97, 1_08, 93_14, 1_74_15, 6_33_99, 1_08, 1_14_43, 76_14, 1_83_16, 1_18, 42_84, 71_48, 1_24_30, 1_43, 14_00, 2_57_03, 1_58, 1_11, 42_84, 71_48, 1_17_72, 1_43, 2_12_97, 10_64, 1_58, 1_22, 2_04, 35_06, 17_54, 11_33, 1_47_87, 15_81, 1_15, 3_32_24, 44_82, 1_11, 13_55, 1_10, 2_91_73, 3_17, 5_08_33, 1_08, 2_01_47, 9_46_65, 1_11, 7_71_98, 1_07, 1], [1_10, 6_26_13, 1_17, 6_38, 1_12, 11_33, 1_21, 2_00_98, 13_55, 7_90_50, 1_38_72, 1_35, 15_96, 5_35_41, 13_52, 1_41, 1_30_39, 55_42, 1_24, 3_02, 5_18, 1_11, 2_68, 29_56, 1_15, 1_49, 44_27, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_39, 12_35, 27_99, 1_82_89, 1_77_80, 2_04, 1_09, 94_74, 12_96, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , )
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ = PegasusTokenizer
UpperCamelCase_ = PegasusTokenizerFast
UpperCamelCase_ = True
UpperCamelCase_ = True
def lowerCAmelCase__ ( self) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase__ : str = PegasusTokenizer(UpperCamelCase , offset=0 , mask_token_sent=UpperCamelCase , mask_token='[MASK]')
tokenizer.save_pretrained(self.tmpdirname)
@cached_property
def lowerCAmelCase__ ( self) -> List[Any]:
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv')
def lowerCAmelCase__ ( self , **UpperCamelCase) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase)
def lowerCAmelCase__ ( self , UpperCamelCase) -> int:
return ("This is a test", "This is a test")
def lowerCAmelCase__ ( self) -> Dict:
UpperCamelCase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname)
UpperCamelCase__ : Union[str, Any] = self.tokenizer_class.from_pretrained(self.tmpdirname)
UpperCamelCase__ : Tuple = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
UpperCamelCase__ : List[str] = rust_tokenizer([raw_input_str] , return_tensors=UpperCamelCase , add_special_tokens=UpperCamelCase).input_ids[0]
UpperCamelCase__ : Optional[Any] = py_tokenizer([raw_input_str] , return_tensors=UpperCamelCase , add_special_tokens=UpperCamelCase).input_ids[0]
self.assertListEqual(UpperCamelCase , UpperCamelCase)
@require_torch
def lowerCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase__ : List[str] = ['This is going to be way too long.' * 10_00, 'short example']
UpperCamelCase__ : List[Any] = ['not super long but more than 5 tokens', 'tiny']
UpperCamelCase__ : Optional[Any] = self._large_tokenizer(UpperCamelCase , padding=UpperCamelCase , truncation=UpperCamelCase , return_tensors='pt')
UpperCamelCase__ : Optional[Any] = self._large_tokenizer(
text_target=UpperCamelCase , max_length=5 , padding=UpperCamelCase , truncation=UpperCamelCase , return_tensors='pt')
assert batch.input_ids.shape == (2, 40_96)
assert batch.attention_mask.shape == (2, 40_96)
assert targets["input_ids"].shape == (2, 5)
assert len(UpperCamelCase) == 2 # input_ids, attention_mask.
def lowerCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase__ : List[Any] = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
UpperCamelCase__ : str = self._large_tokenizer(UpperCamelCase).input_ids
self.assertListEqual(
UpperCamelCase , [1_82, 1_17, 1_42, 5_87, 42_11, 1_20, 1_17, 2_63, 1_12, 8_04, 1_09, 8_56, 2_50_16, 31_37, 4_64, 1_09, 2_69_55, 31_37, 1] , )
| 410 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ = 6 ):
'''simple docstring'''
a_ : Node | None = None
a_ : Node | None = None
self.create_linked_list(lowerCAmelCase_ )
def _lowerCAmelCase ( self , lowerCAmelCase_ ):
'''simple docstring'''
a_ : Any = Node()
a_ : Optional[Any] = current_node
a_ : str = current_node
a_ : Optional[int] = current_node
for _ in range(1 , lowerCAmelCase_ ):
a_ : Optional[Any] = Node()
a_ : Union[str, Any] = current_node
a_ : Any = previous_node
a_ : Optional[Any] = current_node
a_ : str = self.front
a_ : Dict = previous_node
def _lowerCAmelCase ( self ):
'''simple docstring'''
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def _lowerCAmelCase ( self ):
'''simple docstring'''
self.check_can_perform_operation()
return self.front.data if self.front else None
def _lowerCAmelCase ( self , lowerCAmelCase_ ):
'''simple docstring'''
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
a_ : Optional[int] = self.rear.next
if self.rear:
a_ : str = data
def _lowerCAmelCase ( self ):
'''simple docstring'''
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
a_ : int = self.front.data
a_ : Optional[Any] = None
return data
a_ : Tuple = self.front
a_ : Dict = old_front.next
a_ : Dict = old_front.data
a_ : List[str] = None
return data
def _lowerCAmelCase ( self ):
'''simple docstring'''
if self.is_empty():
raise Exception("""Empty Queue""" )
def _lowerCAmelCase ( self ):
'''simple docstring'''
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""" )
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self ):
'''simple docstring'''
a_ : Any | None = None
a_ : Node | None = None
a_ : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 460 |
'''simple docstring'''
def _snake_case ( A_ : list ):
"""simple docstring"""
if len(A_ ) <= 1:
return lst
a_ : Any = 1
while i < len(A_ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
a_ , a_ : int = lst[i], lst[i - 1]
i -= 1
if i == 0:
a_ : List[str] = 1
return lst
if __name__ == "__main__":
__snake_case: List[Any] = input("Enter numbers separated by a comma:\n").strip()
__snake_case: Optional[int] = [int(item) for item in user_input.split(",")]
print(gnome_sort(unsorted))
| 460 | 1 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class __A :
def __init__( self :Dict , __snake_case :str , __snake_case :str=13 , __snake_case :Tuple=32 , __snake_case :int=2 , __snake_case :Any=3 , __snake_case :Any=16 , __snake_case :Optional[Any]=[1, 2, 1] , __snake_case :Union[str, Any]=[2, 2, 4] , __snake_case :Union[str, Any]=2 , __snake_case :Any=2.0 , __snake_case :int=True , __snake_case :int=0.0 , __snake_case :List[Any]=0.0 , __snake_case :Dict=0.1 , __snake_case :Union[str, Any]="gelu" , __snake_case :List[str]=False , __snake_case :int=True , __snake_case :str=0.02 , __snake_case :Dict=1E-5 , __snake_case :List[str]=True , __snake_case :int=None , __snake_case :Optional[int]=True , __snake_case :int=10 , __snake_case :Any=8 , __snake_case :List[str]=["stage1", "stage2", "stage3"] , __snake_case :Tuple=[1, 2, 3] , ):
'''simple docstring'''
__magic_name__ : Tuple =parent
__magic_name__ : List[Any] =batch_size
__magic_name__ : Optional[Any] =image_size
__magic_name__ : int =patch_size
__magic_name__ : Dict =num_channels
__magic_name__ : Any =embed_dim
__magic_name__ : Any =depths
__magic_name__ : List[str] =num_heads
__magic_name__ : Union[str, Any] =window_size
__magic_name__ : str =mlp_ratio
__magic_name__ : str =qkv_bias
__magic_name__ : int =hidden_dropout_prob
__magic_name__ : Any =attention_probs_dropout_prob
__magic_name__ : Any =drop_path_rate
__magic_name__ : int =hidden_act
__magic_name__ : Dict =use_absolute_embeddings
__magic_name__ : Any =patch_norm
__magic_name__ : str =layer_norm_eps
__magic_name__ : Union[str, Any] =initializer_range
__magic_name__ : Dict =is_training
__magic_name__ : Union[str, Any] =scope
__magic_name__ : Optional[int] =use_labels
__magic_name__ : str =type_sequence_label_size
__magic_name__ : Dict =encoder_stride
__magic_name__ : List[str] =out_features
__magic_name__ : Optional[Any] =out_indices
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : List[str] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ : Dict =None
if self.use_labels:
__magic_name__ : List[str] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Optional[Any] =self.get_config()
return config, pixel_values, labels
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def A__ ( self :int , __snake_case :Tuple , __snake_case :Union[str, Any] , __snake_case :List[str] ):
'''simple docstring'''
__magic_name__ : Optional[Any] =MaskFormerSwinModel(config=__snake_case )
model.to(__snake_case )
model.eval()
__magic_name__ : Dict =model(__snake_case )
__magic_name__ : Union[str, Any] =((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__magic_name__ : Dict =int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def A__ ( self :Tuple , __snake_case :Any , __snake_case :Tuple , __snake_case :List[str] ):
'''simple docstring'''
__magic_name__ : Optional[int] =MaskFormerSwinBackbone(config=__snake_case )
model.to(__snake_case )
model.eval()
__magic_name__ : Optional[Any] =model(__snake_case )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(__snake_case ):
__magic_name__ : str =["""stem"""]
__magic_name__ : Optional[int] =MaskFormerSwinBackbone(config=__snake_case )
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
__magic_name__ : Optional[int] =self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ : Optional[int] =config_and_inputs
__magic_name__ : Any ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __A ( snake_case_ , snake_case_ , unittest.TestCase ):
UpperCamelCase = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
UpperCamelCase = {'''feature-extraction''': MaskFormerSwinModel} if is_torch_available() else {}
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def A__ ( self :Dict ):
'''simple docstring'''
__magic_name__ : Optional[int] =MaskFormerSwinModelTester(self )
__magic_name__ : Dict =ConfigTester(self , config_class=__snake_case , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
pass
def A__ ( self :int ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
return
def A__ ( self :Dict ):
'''simple docstring'''
__magic_name__ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def A__ ( self :Dict ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__snake_case )
@unittest.skip("""Swin does not use inputs_embeds""" )
def A__ ( self :List[Any] ):
'''simple docstring'''
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def A__ ( self :Any ):
'''simple docstring'''
pass
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ , __magic_name__ : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : List[str] =model_class(__snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__magic_name__ : Dict =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__snake_case , nn.Linear ) )
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
__magic_name__ , __magic_name__ : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : List[str] =model_class(__snake_case )
__magic_name__ : Optional[Any] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ : int =[*signature.parameters.keys()]
__magic_name__ : List[str] =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __snake_case )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def A__ ( self :str ):
'''simple docstring'''
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def A__ ( self :Tuple ):
'''simple docstring'''
pass
def A__ ( self :Any , __snake_case :List[str] , __snake_case :List[Any] , __snake_case :Union[str, Any] , __snake_case :str ):
'''simple docstring'''
__magic_name__ : Tuple =model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
__magic_name__ : List[Any] =model(**self._prepare_for_class(__snake_case , __snake_case ) )
__magic_name__ : Dict =outputs.hidden_states
__magic_name__ : Any =getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__snake_case ) , __snake_case )
# Swin has a different seq_length
__magic_name__ : str =(
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__magic_name__ : Optional[Any] =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def A__ ( self :int ):
'''simple docstring'''
__magic_name__ , __magic_name__ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Any =(
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__magic_name__ : Tuple =True
self.check_hidden_states_output(__snake_case , __snake_case , __snake_case , __snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ : Tuple =True
self.check_hidden_states_output(__snake_case , __snake_case , __snake_case , __snake_case )
def A__ ( self :str ):
'''simple docstring'''
__magic_name__ , __magic_name__ : Any =self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Union[str, Any] =3
__magic_name__ : str =(
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__magic_name__ : Any =(
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__magic_name__ : Tuple =image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__magic_name__ : List[Any] =image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__magic_name__ : Union[str, Any] =True
self.check_hidden_states_output(__snake_case , __snake_case , __snake_case , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ : List[Any] =True
self.check_hidden_states_output(__snake_case , __snake_case , __snake_case , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def A__ ( self :str ):
'''simple docstring'''
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def A__ ( self :str ):
'''simple docstring'''
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def A__ ( self :Any ):
'''simple docstring'''
pass
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ , __magic_name__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(__snake_case :Tuple ):
__magic_name__ : Union[str, Any] =0
return t
def check_equivalence(__snake_case :str , __snake_case :Optional[Any] , __snake_case :int , __snake_case :List[Any]={} ):
with torch.no_grad():
__magic_name__ : Optional[int] =model(**__snake_case , return_dict=__snake_case , **__snake_case )
__magic_name__ : List[str] =model(**__snake_case , return_dict=__snake_case , **__snake_case ).to_tuple()
def recursive_check(__snake_case :Optional[Any] , __snake_case :Tuple ):
if isinstance(__snake_case , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(__snake_case , __snake_case ):
recursive_check(__snake_case , __snake_case )
elif isinstance(__snake_case , __snake_case ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(__snake_case , __snake_case )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(__snake_case ) , set_nan_tensor_to_zero(__snake_case ) , atol=1E-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
f" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"
f" {torch.isnan(__snake_case ).any()} and `inf`: {torch.isinf(__snake_case )}. Dict has"
f" `nan`: {torch.isnan(__snake_case ).any()} and `inf`: {torch.isinf(__snake_case )}."
) , )
recursive_check(__snake_case , __snake_case )
for model_class in self.all_model_classes:
__magic_name__ : Optional[int] =model_class(__snake_case )
model.to(__snake_case )
model.eval()
__magic_name__ : Tuple =self._prepare_for_class(__snake_case , __snake_case )
__magic_name__ : int =self._prepare_for_class(__snake_case , __snake_case )
check_equivalence(__snake_case , __snake_case , __snake_case )
__magic_name__ : Tuple =self._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
__magic_name__ : str =self._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
check_equivalence(__snake_case , __snake_case , __snake_case )
__magic_name__ : Optional[Any] =self._prepare_for_class(__snake_case , __snake_case )
__magic_name__ : Tuple =self._prepare_for_class(__snake_case , __snake_case )
check_equivalence(__snake_case , __snake_case , __snake_case , {"""output_hidden_states""": True} )
__magic_name__ : Union[str, Any] =self._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
__magic_name__ : Tuple =self._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
check_equivalence(__snake_case , __snake_case , __snake_case , {"""output_hidden_states""": True} )
@require_torch
class __A ( unittest.TestCase , snake_case_ ):
UpperCamelCase = (MaskFormerSwinBackbone,) if is_torch_available() else ()
UpperCamelCase = MaskFormerSwinConfig
def A__ ( self :List[Any] ):
'''simple docstring'''
__magic_name__ : Any =MaskFormerSwinModelTester(self )
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ , __magic_name__ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : List[Any] =inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
__magic_name__ : Union[str, Any] =backbone_class(__snake_case )
backbone.to(__snake_case )
backbone.eval()
__magic_name__ : Optional[Any] =backbone(**__snake_case )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , __snake_case )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
__magic_name__ : List[str] =backbone(**__snake_case , output_hidden_states=__snake_case )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__magic_name__ , __magic_name__ , __magic_name__ : int =hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
__magic_name__ : Union[str, Any] =backbone(**__snake_case , output_attentions=__snake_case )
self.assertIsNotNone(outputs.attentions )
| 21 |
'''simple docstring'''
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
A__ : Dict =''''''
if version.parse(importlib_metadata.version('''jiwer''')) < version.parse('''2.3.0'''):
class UpperCAmelCase ( tr.AbstractTransform ):
def __init__( self : Any , __snake_case : str = " " ) -> Any:
_lowerCAmelCase = sentence_delimiter
def lowercase__ ( self : str , __snake_case : str ) -> Optional[int]:
return list(__snake_case )
def lowercase__ ( self : Tuple , __snake_case : List[str] ) -> Union[str, Any]:
_lowerCAmelCase = []
for sent_idx, sentence in enumerate(__snake_case ):
chars.extend(self.process_string(__snake_case ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(__snake_case ) - 1:
chars.append(self.sentence_delimiter )
return chars
A__ : Optional[Any] =tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
A__ : Tuple =tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
A__ : List[str] ='''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
A__ : Optional[int] ='''\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
'''
A__ : List[str] ='''
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> cer = datasets.load_metric("cer")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
def lowercase__ ( self : Any ) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
"""https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates""",
] , )
def lowercase__ ( self : Optional[Any] , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : Tuple=False ) -> List[Any]:
if concatenate_texts:
return jiwer.compute_measures(
__snake_case , __snake_case , truth_transform=__snake_case , hypothesis_transform=__snake_case , )["wer"]
_lowerCAmelCase = 0
_lowerCAmelCase = 0
for prediction, reference in zip(__snake_case , __snake_case ):
_lowerCAmelCase = jiwer.compute_measures(
__snake_case , __snake_case , truth_transform=__snake_case , hypothesis_transform=__snake_case , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 207 | 0 |
'''simple docstring'''
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def SCREAMING_SNAKE_CASE__ ( snake_case : Dataset , snake_case : Dict[str, str] ) -> Dict:
"""simple docstring"""
a : List[str] = args.log_outputs
a : Tuple = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
a : Optional[int] = load_metric('wer' )
a : Tuple = load_metric('cer' )
# compute metrics
a : Union[str, Any] = wer.compute(references=result['target'] , predictions=result['prediction'] )
a : Dict = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
a : int = F"""WER: {wer_result}\nCER: {cer_result}"""
print(snake_case )
with open(F"""{dataset_id}_eval_results.txt""" , 'w' ) as f:
f.write(snake_case )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
a : Optional[int] = F"""log_{dataset_id}_predictions.txt"""
a : Union[str, Any] = F"""log_{dataset_id}_targets.txt"""
with open(snake_case , 'w' ) as p, open(snake_case , 'w' ) as t:
# mapping function to write output
def write_to_file(snake_case : Dict , snake_case : Tuple ):
p.write(F"""{i}""" + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(F"""{i}""" + '\n' )
t.write(batch['target'] + '\n' )
result.map(snake_case , with_indices=snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : str ) -> str:
"""simple docstring"""
a : Optional[Any] = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
a : int = re.sub(snake_case , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
a : int = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
a : str = ' '.join(text.split(snake_case ) )
return text
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] ) -> Union[str, Any]:
"""simple docstring"""
# load dataset
a : Optional[int] = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
a : Any = AutoFeatureExtractor.from_pretrained(args.model_id )
a : List[Any] = feature_extractor.sampling_rate
# resample audio
a : int = dataset.cast_column('audio' , Audio(sampling_rate=snake_case ) )
# load eval pipeline
if args.device is None:
a : Dict = 0 if torch.cuda.is_available() else -1
a : List[Any] = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(snake_case : List[str] ):
a : List[Any] = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
a : Dict = prediction['text']
a : Optional[int] = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
a : int = dataset.map(snake_case , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(snake_case , snake_case )
if __name__ == "__main__":
UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
UpperCamelCase : List[str] = parser.parse_args()
main(args)
| 610 | '''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase : int = """▁"""
UpperCamelCase : int = {"""vocab_file""": """spiece.model"""}
UpperCamelCase : Optional[int] = {
"""vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""}
}
UpperCamelCase : Optional[int] = {
"""google/pegasus-xsum""": 512,
}
UpperCamelCase : List[Any] = logging.get_logger(__name__)
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Tuple = VOCAB_FILES_NAMES
A : List[str] = VOCAB_FILES_NAMES
A : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
A : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Any = ["input_ids", "attention_mask"]
def __init__( self : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str="<pad>" , UpperCAmelCase_ : str="</s>" , UpperCAmelCase_ : Any="<unk>" , UpperCAmelCase_ : List[str]="<mask_2>" , UpperCAmelCase_ : int="<mask_1>" , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Union[str, Any]=1_0_3 , UpperCAmelCase_ : Optional[Dict[str, Any]] = None , **UpperCAmelCase_ : List[Any] , ):
"""simple docstring"""
a : Optional[Any] = offset
if additional_special_tokens is not None:
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
raise TypeError(
f"""additional_special_tokens should be of type {type(UpperCAmelCase_)}, but is"""
f""" {type(UpperCAmelCase_)}""")
a : List[Any] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"""<unk_{i}>""" for i in range(len(UpperCAmelCase_) , self.offset - 1)
]
if len(set(UpperCAmelCase_)) != len(UpperCAmelCase_):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""")
a : Union[str, Any] = additional_special_tokens_extended
else:
a : List[str] = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset)]
a : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token_sent=UpperCAmelCase_ , offset=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , )
a : List[Any] = mask_token_sent
a : Optional[Any] = vocab_file
a : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(UpperCAmelCase_)
# add special tokens to encoder dict
a : Dict[int, str] = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
})
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1)})
a : Dict[str, int] = {v: k for k, v in self.encoder.items()}
@property
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
return len(self.sp_model) + self.offset
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : Optional[int] = {self.convert_ids_to_tokens(UpperCAmelCase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self : int):
"""simple docstring"""
a : Optional[int] = self.__dict__.copy()
a : List[str] = None
return state
def __setstate__( self : Optional[int] , UpperCAmelCase_ : Any):
"""simple docstring"""
a : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
a : str = {}
a : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : str):
"""simple docstring"""
return self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : str):
"""simple docstring"""
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
a : Union[str, Any] = self.sp_model.piece_to_id(UpperCAmelCase_)
return sp_id + self.offset
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : int):
"""simple docstring"""
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
a : Dict = self.sp_model.IdToPiece(index - self.offset)
return token
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
a : Optional[int] = []
a : Dict = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(UpperCAmelCase_) + token
a : Dict = []
else:
current_sub_tokens.append(UpperCAmelCase_)
out_string += self.sp_model.decode(UpperCAmelCase_)
return out_string.strip()
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : str=False):
"""simple docstring"""
return 1
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : List[str]):
"""simple docstring"""
a : Tuple = set(self.all_special_ids) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : List , UpperCAmelCase_ : Optional[List] = None , UpperCAmelCase_ : bool = False):
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(UpperCAmelCase_)
elif token_ids_a is None:
return self._special_token_mask(UpperCAmelCase_) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a) + [1]
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any]=None):
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None):
"""simple docstring"""
if not os.path.isdir(UpperCAmelCase_):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
a : int = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCAmelCase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , UpperCAmelCase_)
elif not os.path.isfile(self.vocab_file):
with open(UpperCAmelCase_ , 'wb') as fi:
a : Any = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_)
return (out_vocab_file,)
| 610 | 1 |
'''simple docstring'''
from __future__ import annotations
lowercase__ : Any = []
def _lowerCAmelCase ( __snake_case : list[list[int]] , __snake_case : int , __snake_case : int ) -> bool:
for i in range(len(__snake_case ) ):
if board[row][i] == 1:
return False
for i in range(len(__snake_case ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(__snake_case , -1 , -1 ) , range(__snake_case , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(__snake_case , -1 , -1 ) , range(__snake_case , len(__snake_case ) ) ):
if board[i][j] == 1:
return False
return True
def _lowerCAmelCase ( __snake_case : list[list[int]] , __snake_case : int ) -> bool:
if row >= len(__snake_case ):
solution.append(__snake_case )
printboard(__snake_case )
print()
return True
for i in range(len(__snake_case ) ):
if is_safe(__snake_case , __snake_case , __snake_case ):
__A : Any = 1
solve(__snake_case , row + 1 )
__A : Tuple = 0
return False
def _lowerCAmelCase ( __snake_case : list[list[int]] ) -> None:
for i in range(len(__snake_case ) ):
for j in range(len(__snake_case ) ):
if board[i][j] == 1:
print('Q' , end=' ' )
else:
print('.' , end=' ' )
print()
# n=int(input("The no. of queens"))
lowercase__ : List[Any] = 8
lowercase__ : Any = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution)) | 8 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class a ( unittest.TestCase ):
snake_case_ = ViTImageProcessor if is_vision_available() else None
@property
def A_ ( self : List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def A_ ( self : Union[str, Any] ):
snake_case_ = (3, 32, 128)
snake_case_ = tempfile.mkdtemp()
# fmt: off
snake_case_ = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
snake_case_ = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowercase_ ) + '''\n''' )
snake_case_ = {
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
snake_case_ = os.path.join(self.tmpdirname , lowercase_ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(lowercase_ , lowercase_ )
def A_ ( self : Any , **lowercase_ : str ):
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowercase_ )
def A_ ( self : List[str] , **lowercase_ : List[Any] ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowercase_ )
def A_ ( self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def A_ ( self : int ):
snake_case_ = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
snake_case_ = Image.fromarray(np.moveaxis(lowercase_ , 0 , -1 ) )
return image_input
def A_ ( self : int ):
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_image_processor()
snake_case_ = MgpstrProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
processor.save_pretrained(self.tmpdirname )
snake_case_ = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase_ )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , lowercase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase_ )
def A_ ( self : str ):
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_image_processor()
snake_case_ = MgpstrProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
processor.save_pretrained(self.tmpdirname )
snake_case_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
snake_case_ = self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0 )
snake_case_ = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=lowercase_ , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , lowercase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase_ )
def A_ ( self : Tuple ):
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = MgpstrProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
snake_case_ = self.prepare_image_inputs()
snake_case_ = image_processor(lowercase_ , return_tensors='''np''' )
snake_case_ = processor(images=lowercase_ , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A_ ( self : Union[str, Any] ):
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = MgpstrProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
snake_case_ = '''test'''
snake_case_ = processor(text=lowercase_ )
snake_case_ = tokenizer(lowercase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A_ ( self : Dict ):
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = MgpstrProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
snake_case_ = '''test'''
snake_case_ = self.prepare_image_inputs()
snake_case_ = processor(text=lowercase_ , images=lowercase_ )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(lowercase_ ):
processor()
def A_ ( self : int ):
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = MgpstrProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
snake_case_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
snake_case_ = processor.char_decode(lowercase_ )
snake_case_ = tokenizer.batch_decode(lowercase_ )
snake_case_ = [seq.replace(''' ''' , '''''' ) for seq in decoded_tok]
self.assertListEqual(lowercase_ , lowercase_ )
def A_ ( self : Optional[Any] ):
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = MgpstrProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
snake_case_ = None
snake_case_ = self.prepare_image_inputs()
snake_case_ = processor(text=lowercase_ , images=lowercase_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def A_ ( self : Dict ):
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = MgpstrProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
snake_case_ = torch.randn(1 , 27 , 38 )
snake_case_ = torch.randn(1 , 27 , 5_0257 )
snake_case_ = torch.randn(1 , 27 , 3_0522 )
snake_case_ = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
| 640 | 0 |
import numpy as np
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return vector * sigmoid(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase = {
'configuration_clap': [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapAudioConfig',
'ClapConfig',
'ClapTextConfig',
],
'processing_clap': ['ClapProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapModel',
'ClapPreTrainedModel',
'ClapTextModel',
'ClapTextModelWithProjection',
'ClapAudioModel',
'ClapAudioModelWithProjection',
]
lowerCAmelCase = ['ClapFeatureExtractor']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 429 | 0 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : List[Any] = '''Speech2TextFeatureExtractor'''
lowercase__ : Any = '''Speech2TextTokenizer'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : Optional[Any] = self.feature_extractor
__magic_name__ : List[Any] = False
def __call__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Dict:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*lowerCAmelCase__ , **lowerCAmelCase__ )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
__magic_name__ : Union[str, Any] = kwargs.pop("""raw_speech""" )
else:
__magic_name__ : Union[str, Any] = kwargs.pop("""audio""" , lowerCAmelCase__ )
__magic_name__ : Tuple = kwargs.pop("""sampling_rate""" , lowerCAmelCase__ )
__magic_name__ : Tuple = kwargs.pop("""text""" , lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
__magic_name__ : List[Any] = args[0]
__magic_name__ : List[Any] = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
__magic_name__ : Optional[Any] = self.feature_extractor(lowerCAmelCase__ , *lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , **lowerCAmelCase__ )
if text is not None:
__magic_name__ : str = self.tokenizer(lowerCAmelCase__ , **lowerCAmelCase__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__magic_name__ : Any = encodings["""input_ids"""]
return inputs
def __magic_name__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]:
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@contextmanager
def __magic_name__ ( self ) -> int:
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
__magic_name__ : int = True
__magic_name__ : Optional[int] = self.tokenizer
yield
__magic_name__ : Optional[int] = self.feature_extractor
__magic_name__ : Dict = False
| 324 |
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
__magic_name__: List[Any] = False
class snake_case__ ( unittest.TestCase ):
def __magic_name__ ( self , lowerCAmelCase__=32 ) -> Optional[Any]:
set_seed(0 )
__magic_name__ : Optional[Any] = UNetaDModel(sample_size=lowerCAmelCase__ , in_channels=3 , out_channels=3 )
__magic_name__ : Optional[Any] = torch.optim.SGD(model.parameters() , lr=0.0_0_0_1 )
return model, optimizer
@slow
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : List[str] = """cpu""" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
__magic_name__ : Dict = DDPMScheduler(
num_train_timesteps=10_00 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule="""linear""" , clip_sample=lowerCAmelCase__ , )
__magic_name__ : Any = DDIMScheduler(
num_train_timesteps=10_00 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule="""linear""" , clip_sample=lowerCAmelCase__ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
__magic_name__ : List[Any] = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(lowerCAmelCase__ ) for _ in range(4 )]
__magic_name__ : Optional[int] = [torch.randn((4, 3, 32, 32) ).to(lowerCAmelCase__ ) for _ in range(4 )]
__magic_name__ : Optional[int] = [torch.randint(0 , 10_00 , (4,) ).long().to(lowerCAmelCase__ ) for _ in range(4 )]
# train with a DDPM scheduler
__magic_name__ ,__magic_name__ : Dict = self.get_model_optimizer(resolution=32 )
model.train().to(lowerCAmelCase__ )
for i in range(4 ):
optimizer.zero_grad()
__magic_name__ : List[Any] = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
__magic_name__ : List[Any] = model(lowerCAmelCase__ , timesteps[i] ).sample
__magic_name__ : Optional[int] = torch.nn.functional.mse_loss(lowerCAmelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
__magic_name__ ,__magic_name__ : Union[str, Any] = self.get_model_optimizer(resolution=32 )
model.train().to(lowerCAmelCase__ )
for i in range(4 ):
optimizer.zero_grad()
__magic_name__ : Tuple = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
__magic_name__ : List[Any] = model(lowerCAmelCase__ , timesteps[i] ).sample
__magic_name__ : List[str] = torch.nn.functional.mse_loss(lowerCAmelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-5 ) )
self.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-5 ) )
| 324 | 1 |
"""simple docstring"""
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class snake_case ( __A ):
def __init__( self :int , _lowerCamelCase :int , _lowerCamelCase :Dict=7_6_8 ):
super().__init__(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = proj_size
__SCREAMING_SNAKE_CASE : Optional[Any] = CLIPVisionModel(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE : Dict = PaintByExampleMapper(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = nn.LayerNorm(config.hidden_size )
__SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
__SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def SCREAMING_SNAKE_CASE_ ( self :int , _lowerCamelCase :str , _lowerCamelCase :Optional[int]=False ):
__SCREAMING_SNAKE_CASE : Tuple = self.model(pixel_values=UpperCamelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = clip_output.pooler_output
__SCREAMING_SNAKE_CASE : Tuple = self.mapper(latent_states[:, None] )
__SCREAMING_SNAKE_CASE : Any = self.final_layer_norm(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = self.proj_out(UpperCamelCase__ )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class snake_case ( nn.Module ):
def __init__( self :List[str] , _lowerCamelCase :Dict ):
super().__init__()
__SCREAMING_SNAKE_CASE : int = (config.num_hidden_layers + 1) // 5
__SCREAMING_SNAKE_CASE : List[Any] = config.hidden_size
__SCREAMING_SNAKE_CASE : Optional[int] = 1
__SCREAMING_SNAKE_CASE : Dict = nn.ModuleList(
[
BasicTransformerBlock(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , activation_fn='''gelu''' , attention_bias=UpperCamelCase__ )
for _ in range(UpperCamelCase__ )
] )
def SCREAMING_SNAKE_CASE_ ( self :Any , _lowerCamelCase :str ):
for block in self.blocks:
__SCREAMING_SNAKE_CASE : List[str] = block(UpperCamelCase__ )
return hidden_states
| 709 |
"""simple docstring"""
_lowerCamelCase = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
_lowerCamelCase = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
_lowerCamelCase = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 401 | 0 |
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""):
lowerCamelCase__ : str = True
from torch.cuda.amp import autocast
lowerCamelCase__ : List[Any] = logging.getLogger(__name__)
@dataclass
class __magic_name__ :
'''simple docstring'''
__lowercase : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__lowercase : Optional[str] = field(
default=snake_case_ ,metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} ,)
__lowercase : Optional[bool] = field(
default=snake_case_ ,metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
__lowercase : Optional[bool] = field(
default=snake_case_ ,metadata={'help': 'Whether to log verbose messages or not.'} ,)
__lowercase : Optional[float] = field(
default=2.0 ,metadata={'help': 'Maximum temperature for gumbel softmax.'} )
__lowercase : Optional[float] = field(
default=0.5 ,metadata={'help': 'Minimum temperature for gumbel softmax.'} )
__lowercase : Optional[float] = field(
default=0.99_99_95 ,metadata={'help': 'Decay of gumbel temperature during training.'} )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
snake_case__ = logging.WARNING
if model_args.verbose_logging:
snake_case__ = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
snake_case__ = logging.INFO
logger.setLevel(__lowerCAmelCase )
@dataclass
class __magic_name__ :
'''simple docstring'''
__lowercase : str = field(
default=snake_case_ ,metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
__lowercase : Optional[str] = field(
default=snake_case_ ,metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
__lowercase : Optional[str] = field(
default='train' ,metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} ,)
__lowercase : Optional[str] = field(
default='validation' ,metadata={
'help': (
'The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''
)
} ,)
__lowercase : Optional[str] = field(
default='file' ,metadata={'help': 'Column in the dataset that contains speech file path. Defaults to \'file\''} ,)
__lowercase : bool = field(
default=snake_case_ ,metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
__lowercase : Optional[int] = field(
default=1 ,metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} ,)
__lowercase : Optional[int] = field(
default=snake_case_ ,metadata={'help': 'The number of processes to use for the preprocessing.'} ,)
__lowercase : Optional[float] = field(
default=20.0 ,metadata={'help': 'Filter audio files that are longer than `max_duration_in_seconds` seconds'} )
@dataclass
class __magic_name__ :
'''simple docstring'''
__lowercase : WavaVecaForPreTraining
__lowercase : WavaVecaFeatureExtractor
__lowercase : Union[bool, str] = "longest"
__lowercase : Optional[int] = None
__lowercase : Optional[int] = None
def __call__( self:Optional[Any] , _a:List[Dict[str, Union[List[int], torch.Tensor]]] ):
# reformat list to dict and set to pytorch format
snake_case__ = self.feature_extractor.pad(
_a , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
snake_case__ = self.model._get_feat_extract_output_lengths(batch['''input_values'''].shape[-1] )
snake_case__ = batch['''input_values'''].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
snake_case__ = self.model._get_feat_extract_output_lengths(batch['''attention_mask'''].sum(-1 ) ).to(
torch.long )
snake_case__ = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch['''input_values'''].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
snake_case__ = 1
snake_case__ = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
snake_case__ = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=_a , min_masks=2 , )
return batch
class __magic_name__ (snake_case_ ):
'''simple docstring'''
def __init__( self:str , *_a:List[Any] , _a:Dict=1 , _a:List[str]=0 , _a:Union[str, Any]=1.0 , **_a:Optional[Any] ):
super().__init__(*_a , **_a )
snake_case__ = 0
snake_case__ = max_gumbel_temp
snake_case__ = min_gumbel_temp
snake_case__ = gumbel_temp_decay
def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:nn.Module , _a:Dict[str, Union[torch.Tensor, Any]] ):
model.train()
snake_case__ = self._prepare_inputs(_a )
if self.use_amp:
with autocast():
snake_case__ = self.compute_loss(_a , _a )
else:
snake_case__ = self.compute_loss(_a , _a )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
snake_case__ = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
snake_case__ = loss.sum() / (inputs['''mask_time_indices''']).sum()
else:
raise ValueError(F"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
snake_case__ = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(_a ).backward()
elif self.use_apex:
with amp.scale_loss(_a , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(_a )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
snake_case__ , snake_case__ , snake_case__ = parser.parse_args_into_dataclasses()
configure_logger(__lowerCAmelCase , __lowerCAmelCase )
# Downloading and loading a dataset from the hub.
snake_case__ = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
snake_case__ = DatasetDict()
snake_case__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""{data_args.train_split_name}[:{data_args.validation_split_percentage}%]""" , cache_dir=model_args.cache_dir , )
snake_case__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""{data_args.train_split_name}[{data_args.validation_split_percentage}%:]""" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
snake_case__ = DatasetDict()
snake_case__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split='''validation''' , cache_dir=model_args.cache_dir , )
snake_case__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""{data_args.train_split_name}""" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
snake_case__ = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=__lowerCAmelCase )
def prepare_dataset(__lowerCAmelCase ):
# check that all files have the correct sampling rate
snake_case__ , snake_case__ = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
snake_case__ = datasets.map(
__lowerCAmelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets['''train'''].column_names )
# filter audio files that are too long
snake_case__ = vectorized_datasets.filter(
lambda __lowerCAmelCase : len(data['''speech'''] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(__lowerCAmelCase ):
return feature_extractor(batch['''speech'''] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
snake_case__ = vectorized_datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets['''train'''].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
snake_case__ = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
'''PreTraining is only supported for ``config.do_stable_layer_norm=True`` and'''
''' ``config.feat_extract_norm=\'layer\'''' )
snake_case__ = WavaVecaForPreTraining(__lowerCAmelCase )
snake_case__ = DataCollatorForWavaVecaPretraining(model=__lowerCAmelCase , feature_extractor=__lowerCAmelCase )
snake_case__ = WavaVecaPreTrainer(
model=__lowerCAmelCase , data_collator=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=vectorized_datasets['''train'''] , eval_dataset=vectorized_datasets['''validation'''] , tokenizer=__lowerCAmelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 33 |
'''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
lowerCAmelCase : List[Any] = ["""text""", """image""", """audio"""]
def _A ( A ) -> Dict:
lowercase : str = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((5_1_2, 5_1_2) ) )
elif input_type == "audio":
inputs.append(torch.ones(3_0_0_0 ) )
elif isinstance(A ,A ):
inputs.append(create_inputs(A ) )
else:
raise ValueError(F'''Invalid type requested: {input_type}''' )
return inputs
def _A ( A ) -> str:
lowercase : Tuple = []
for output in outputs:
if isinstance(A ,(str, AgentText) ):
output_types.append("text" )
elif isinstance(A ,(Image.Image, AgentImage) ):
output_types.append("image" )
elif isinstance(A ,(torch.Tensor, AgentAudio) ):
output_types.append("audio" )
else:
raise ValueError(F'''Invalid output: {output}''' )
return output_types
@is_tool_test
class _UpperCamelCase :
'''simple docstring'''
def a__ ( self ) -> Optional[Any]:
self.assertTrue(hasattr(self.tool , "inputs" ) )
self.assertTrue(hasattr(self.tool , "outputs" ) )
lowercase : Optional[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , a_ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
lowercase : Any = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def a__ ( self ) -> Any:
lowercase : Any = create_inputs(self.tool.inputs )
lowercase : Tuple = self.tool(*a_ )
# There is a single output
if len(self.tool.outputs ) == 1:
lowercase : Any = [outputs]
self.assertListEqual(output_types(a_ ) , self.tool.outputs )
def a__ ( self ) -> List[str]:
self.assertTrue(hasattr(self.tool , "description" ) )
self.assertTrue(hasattr(self.tool , "default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def a__ ( self ) -> int:
lowercase : str = create_inputs(self.tool.inputs )
lowercase : str = self.tool(*a_ )
if not isinstance(a_ , a_ ):
lowercase : Union[str, Any] = [outputs]
self.assertEqual(len(a_ ) , len(self.tool.outputs ) )
for output, output_type in zip(a_ , self.tool.outputs ):
lowercase : List[str] = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(a_ , a_ ) )
def a__ ( self ) -> Optional[int]:
lowercase : int = create_inputs(self.tool.inputs )
lowercase : str = []
for _input, input_type in zip(a_ , self.tool.inputs ):
if isinstance(a_ , a_ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
lowercase : Optional[int] = self.tool(*a_ )
if not isinstance(a_ , a_ ):
lowercase : str = [outputs]
self.assertEqual(len(a_ ) , len(self.tool.outputs ) )
| 372 | 0 |
"""simple docstring"""
import flax.linen as nn
import jax
import jax.numpy as jnp
class __a ( nn.Module ):
UpperCamelCase_ : int
UpperCamelCase_ : jnp.dtype = jnp.floataa
def _SCREAMING_SNAKE_CASE ( self : Optional[int] )-> Dict:
"""simple docstring"""
UpperCamelCase = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any] )-> Any:
"""simple docstring"""
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = hidden_states.shape
UpperCamelCase = jax.image.resize(
UpperCAmelCase_ , shape=(batch, height * 2, width * 2, channels) , method="nearest" , )
UpperCamelCase = self.conv(UpperCAmelCase_ )
return hidden_states
class __a ( nn.Module ):
UpperCamelCase_ : int
UpperCamelCase_ : jnp.dtype = jnp.floataa
def _SCREAMING_SNAKE_CASE ( self : Optional[int] )-> Any:
"""simple docstring"""
UpperCamelCase = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : str , UpperCAmelCase_ : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.conv(UpperCAmelCase_ )
return hidden_states
class __a ( nn.Module ):
UpperCamelCase_ : int
UpperCamelCase_ : int = None
UpperCamelCase_ : float = 0.0
UpperCamelCase_ : bool = None
UpperCamelCase_ : jnp.dtype = jnp.floataa
def _SCREAMING_SNAKE_CASE ( self : Tuple )-> List[Any]:
"""simple docstring"""
UpperCamelCase = self.in_channels if self.out_channels is None else self.out_channels
UpperCamelCase = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
UpperCamelCase = nn.Conv(
UpperCAmelCase_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
UpperCamelCase = nn.Dense(UpperCAmelCase_ , dtype=self.dtype )
UpperCamelCase = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
UpperCamelCase = nn.Dropout(self.dropout_prob )
UpperCamelCase = nn.Conv(
UpperCAmelCase_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
UpperCamelCase = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
UpperCamelCase = None
if use_nin_shortcut:
UpperCamelCase = nn.Conv(
UpperCAmelCase_ , kernel_size=(1, 1) , strides=(1, 1) , padding="VALID" , dtype=self.dtype , )
def __call__( self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict=True )-> Any:
"""simple docstring"""
UpperCamelCase = hidden_states
UpperCamelCase = self.norma(UpperCAmelCase_ )
UpperCamelCase = nn.swish(UpperCAmelCase_ )
UpperCamelCase = self.conva(UpperCAmelCase_ )
UpperCamelCase = self.time_emb_proj(nn.swish(UpperCAmelCase_ ) )
UpperCamelCase = jnp.expand_dims(jnp.expand_dims(UpperCAmelCase_ , 1 ) , 1 )
UpperCamelCase = hidden_states + temb
UpperCamelCase = self.norma(UpperCAmelCase_ )
UpperCamelCase = nn.swish(UpperCAmelCase_ )
UpperCamelCase = self.dropout(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = self.conva(UpperCAmelCase_ )
if self.conv_shortcut is not None:
UpperCamelCase = self.conv_shortcut(UpperCAmelCase_ )
return hidden_states + residual
| 712 |
"""simple docstring"""
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __a ( _lowerCAmelCase ):
UpperCamelCase_ : Any = (EulerDiscreteScheduler,)
UpperCamelCase_ : Dict = 10
def _SCREAMING_SNAKE_CASE ( self : Any , **UpperCAmelCase_ : str )-> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = {
"num_train_timesteps": 1_100,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**UpperCAmelCase_ )
return config
def _SCREAMING_SNAKE_CASE ( self : List[Any] )-> Any:
"""simple docstring"""
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : int )-> Any:
"""simple docstring"""
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=UpperCAmelCase_ , beta_end=UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : str )-> Dict:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] )-> int:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : str )-> Any:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**UpperCAmelCase_ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCamelCase = sample.to(UpperCAmelCase_ )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase = scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = model(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_ )
UpperCamelCase = output.prev_sample
UpperCamelCase = torch.sum(torch.abs(UpperCAmelCase_ ) )
UpperCamelCase = torch.mean(torch.abs(UpperCAmelCase_ ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def _SCREAMING_SNAKE_CASE ( self : List[str] )-> str:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config(prediction_type="v_prediction" )
UpperCamelCase = scheduler_class(**UpperCAmelCase_ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCamelCase = sample.to(UpperCAmelCase_ )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase = scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = model(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_ )
UpperCamelCase = output.prev_sample
UpperCamelCase = torch.sum(torch.abs(UpperCAmelCase_ ) )
UpperCamelCase = torch.mean(torch.abs(UpperCAmelCase_ ) )
assert abs(result_sum.item() - 0.0002 ) < 1e-2
assert abs(result_mean.item() - 2.2_676e-06 ) < 1e-3
def _SCREAMING_SNAKE_CASE ( self : List[str] )-> Tuple:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**UpperCAmelCase_ )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCAmelCase_ )
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCamelCase = sample.to(UpperCAmelCase_ )
for t in scheduler.timesteps:
UpperCamelCase = scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = model(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_ )
UpperCamelCase = output.prev_sample
UpperCamelCase = torch.sum(torch.abs(UpperCAmelCase_ ) )
UpperCamelCase = torch.mean(torch.abs(UpperCAmelCase_ ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def _SCREAMING_SNAKE_CASE ( self : Optional[int] )-> Any:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**UpperCAmelCase_ , use_karras_sigmas=UpperCAmelCase_ )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCAmelCase_ )
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCamelCase = sample.to(UpperCAmelCase_ )
for t in scheduler.timesteps:
UpperCamelCase = scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = model(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_ )
UpperCamelCase = output.prev_sample
UpperCamelCase = torch.sum(torch.abs(UpperCAmelCase_ ) )
UpperCamelCase = torch.mean(torch.abs(UpperCAmelCase_ ) )
assert abs(result_sum.item() - 124.52299499511719 ) < 1e-2
assert abs(result_mean.item() - 0.16213932633399963 ) < 1e-3
| 556 | 0 |
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = """▁"""
lowercase = {"""vocab_file""": """vocab.txt""", """sentencepiece_model_ckpt""": """sentencepiece.bpe.model"""}
lowercase = {
"""sentencepiece_model_file""": """sentencepiece.bpe.model""",
"""vocab_file""": """vocab.txt""",
}
lowercase = {
"""vocab_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
},
"""sentencepiece_model_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
},
}
lowercase = {
"""ernie-m-base""": 5_1_4,
"""ernie-m-large""": 5_1_4,
}
lowercase = {
"""ernie-m-base""": {"""do_lower_case""": False},
"""ernie-m-large""": {"""do_lower_case""": False},
}
class __lowercase ( A ):
'''simple docstring'''
_A : List[str] = ["input_ids"]
_A : List[Any] = VOCAB_FILES_NAMES
_A : int = PRETRAINED_INIT_CONFIGURATION
_A : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Any = PRETRAINED_VOCAB_FILES_MAP
_A : Any = RESOURCE_FILES_NAMES
def __init__( self : int , _a : str , _a : Any=None , _a : Any=False , _a : int="utf8" , _a : Tuple="[UNK]" , _a : int="[SEP]" , _a : List[str]="[PAD]" , _a : List[Any]="[CLS]" , _a : List[Any]="[MASK]" , _a : Optional[Dict[str, Any]] = None , **_a : Dict , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
UpperCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , vocab_file=_a , encoding=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
UpperCamelCase__ = do_lower_case
UpperCamelCase__ = sentencepiece_model_ckpt
UpperCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
UpperCamelCase__ = self.load_vocab(filepath=_a )
else:
UpperCamelCase__ = {self.sp_model.id_to_piece(_a ): id for id in range(self.sp_model.get_piece_size() )}
UpperCamelCase__ = {v: k for k, v in self.vocab.items()}
def A_ ( self : Union[str, Any] , _a : Optional[Any] ):
if text is None:
return None
UpperCamelCase__ = self.tokenize(_a )
UpperCamelCase__ , UpperCamelCase__ = '''''', []
for i, ch in enumerate(_a ):
if ch in self.SP_CHAR_MAPPING:
UpperCamelCase__ = self.SP_CHAR_MAPPING.get(_a )
else:
UpperCamelCase__ = unicodedata.normalize('''NFKC''' , _a )
if self.is_whitespace(_a ):
continue
normalized_text += ch
char_mapping.extend([i] * len(_a ) )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = normalized_text, [], 0
if self.do_lower_case:
UpperCamelCase__ = text.lower()
for token in split_tokens:
if token[:1] == "▁":
UpperCamelCase__ = token[1:]
UpperCamelCase__ = text[offset:].index(_a ) + offset
UpperCamelCase__ = start + len(_a )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
UpperCamelCase__ = end
return token_mapping
@property
def A_ ( self : Dict ):
return len(self.vocab )
def A_ ( self : int ):
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self : List[Any] ):
UpperCamelCase__ = self.__dict__.copy()
UpperCamelCase__ = None
return state
def __setstate__( self : Any , _a : Any ):
UpperCamelCase__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCamelCase__ = {}
UpperCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def A_ ( self : Dict , _a : Any ):
return "".join((self.SP_CHAR_MAPPING.get(_a , _a ) for c in text) )
def A_ ( self : Any , _a : Dict , _a : Optional[Any]=False , _a : str=64 , _a : List[str]=0.1 ):
if self.sp_model_kwargs.get('''enable_sampling''' ) is True:
UpperCamelCase__ = True
if self.sp_model_kwargs.get('''alpha''' ) is not None:
UpperCamelCase__ = self.sp_model_kwargs.get('''alpha''' )
if self.sp_model_kwargs.get('''nbest_size''' ) is not None:
UpperCamelCase__ = self.sp_model_kwargs.get('''nbest_size''' )
if not enable_sampling:
UpperCamelCase__ = self.sp_model.EncodeAsPieces(_a )
else:
UpperCamelCase__ = self.sp_model.SampleEncodeAsPieces(_a , _a , _a )
UpperCamelCase__ = []
for pi, piece in enumerate(_a ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(_a ) and pi != 0:
new_pieces.append(_a )
continue
else:
continue
UpperCamelCase__ = 0
for i, chunk in enumerate(_a ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(_a ) or self.is_punct(_a ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(_a )
UpperCamelCase__ = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCamelCase__ = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCamelCase__ = i
if len(_a ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def A_ ( self : List[str] , _a : Optional[Any] ):
UpperCamelCase__ = ''''''.join(_a ).replace(_a , ''' ''' ).strip()
return out_string
def A_ ( self : Optional[int] , _a : Optional[Any] ):
UpperCamelCase__ = self.convert_ids_to_tokens(_a )
UpperCamelCase__ = ''''''.join(_a ).replace(_a , ''' ''' ).strip()
return out_string
def A_ ( self : Optional[Any] , _a : Tuple ):
return self.vocab.get(_a , self.vocab.get(self.unk_token ) )
def A_ ( self : List[Any] , _a : Tuple ):
return self.reverse_vocab.get(_a , self.unk_token )
def A_ ( self : Optional[int] , _a : Optional[int] , _a : Optional[int]=None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase__ = [self.cls_token_id]
UpperCamelCase__ = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def A_ ( self : Optional[Any] , _a : Optional[int] , _a : str=None ):
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def A_ ( self : Any , _a : Tuple , _a : Tuple=None , _a : Any=False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1]
def A_ ( self : Dict , _a : List[int] , _a : Optional[List[int]] = None ):
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(_a ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(_a ) + 1) + [1] * (len(_a ) + 3)
def A_ ( self : Optional[int] , _a : Optional[int] ):
if "\u4e00" <= char <= "\u9fff":
return True
return False
def A_ ( self : List[str] , _a : List[str] ):
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def A_ ( self : Dict , _a : Any ):
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def A_ ( self : int , _a : Dict ):
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(_a ) == 1:
UpperCamelCase__ = unicodedata.category(_a )
if cat == "Zs":
return True
return False
def A_ ( self : Any , _a : Optional[Any] ):
UpperCamelCase__ = {}
with io.open(_a , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(_a ):
UpperCamelCase__ = line.rstrip('''\n''' )
UpperCamelCase__ = int(_a )
return token_to_idx
def A_ ( self : Union[str, Any] , _a : str , _a : Optional[str] = None ):
UpperCamelCase__ = 0
if os.path.isdir(_a ):
UpperCamelCase__ = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
UpperCamelCase__ = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
with open(_a , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda _a : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
UpperCamelCase__ = token_index
writer.write(token + '''\n''' )
index += 1
UpperCamelCase__ = os.path.join(_a , '''sentencepiece.bpe.model''' )
with open(_a , '''wb''' ) as fi:
UpperCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(_a )
return (vocab_file,)
| 240 | # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __lowercase ( A ):
'''simple docstring'''
_A : Optional[Any] = '''naver-clova-ix/donut-base-finetuned-docvqa'''
_A : str = (
'''This is a tool that answers a question about an document (pdf). It takes an input named `document` which '''
'''should be the document containing the information, as well as a `question` that is the question about the '''
'''document. It returns a text that contains the answer to the question.'''
)
_A : str = '''document_qa'''
_A : List[str] = AutoProcessor
_A : Dict = VisionEncoderDecoderModel
_A : List[Any] = ['''image''', '''text''']
_A : List[Any] = ['''text''']
def __init__( self : int , *_a : Any , **_a : Optional[Any] ):
if not is_vision_available():
raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' )
super().__init__(*_a , **_a )
def A_ ( self : int , _a : "Image" , _a : str ):
UpperCamelCase__ = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
UpperCamelCase__ = task_prompt.replace('''{user_input}''' , _a )
UpperCamelCase__ = self.pre_processor.tokenizer(
_a , add_special_tokens=_a , return_tensors='''pt''' ).input_ids
UpperCamelCase__ = self.pre_processor(_a , return_tensors='''pt''' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def A_ ( self : Optional[Any] , _a : Any ):
return self.model.generate(
inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=_a , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=_a , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=_a , ).sequences
def A_ ( self : Dict , _a : Optional[int] ):
UpperCamelCase__ = self.pre_processor.batch_decode(_a )[0]
UpperCamelCase__ = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' )
UpperCamelCase__ = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' )
UpperCamelCase__ = re.sub(R'''<.*?>''' , '''''' , _a , count=1 ).strip() # remove first task start token
UpperCamelCase__ = self.pre_processor.tokenajson(_a )
return sequence["answer"]
| 240 | 1 |
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _lowerCamelCase ( A_ : Optional[int] , A_ : Optional[Any] , A_ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
if openai_config_file == "":
UpperCamelCase__ : Any =OpenAIGPTConfig()
else:
UpperCamelCase__ : Union[str, Any] =OpenAIGPTConfig.from_json_file(A_ )
UpperCamelCase__ : Union[str, Any] =OpenAIGPTModel(A_ )
# Load weights from numpy
load_tf_weights_in_openai_gpt(A_ , A_ , A_ )
# Save pytorch-model
UpperCamelCase__ : Optional[Any] =pytorch_dump_folder_path + "/" + WEIGHTS_NAME
UpperCamelCase__ : Tuple =pytorch_dump_folder_path + "/" + CONFIG_NAME
print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , A_ )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(A_ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--openai_checkpoint_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the TensorFlow checkpoint path.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--openai_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
__UpperCAmelCase = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 719 |
from __future__ import annotations
def _lowerCamelCase ( A_ : list[int] ) -> list[int]:
'''simple docstring'''
if len(A_ ) == 0:
return array
UpperCamelCase__ , UpperCamelCase__ : Dict =min(A_ ), max(A_ )
# Compute the variables
UpperCamelCase__ : Any =_max - _min + 1
UpperCamelCase__ , UpperCamelCase__ : int =[0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
UpperCamelCase__ : Optional[Any] =i - _min
UpperCamelCase__ : Union[str, Any] =i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
UpperCamelCase__ : Optional[int] =0
for i in range(A_ ):
while holes_repeat[i] > 0:
UpperCamelCase__ : Optional[int] =holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = input("""Enter numbers separated by comma:\n""")
__UpperCAmelCase = [int(x) for x in user_input.split(""",""")]
print(pigeon_sort(unsorted))
| 582 | 0 |
'''simple docstring'''
import os
SCREAMING_SNAKE_CASE__ : Optional[Any] = {'''I''': 1, '''V''': 5, '''X''': 1_0, '''L''': 5_0, '''C''': 1_0_0, '''D''': 5_0_0, '''M''': 1_0_0_0}
def a ( UpperCamelCase_ : str ) -> int:
snake_case__ =0
snake_case__ =0
while index < len(UpperCamelCase_ ) - 1:
snake_case__ =SYMBOLS[numerals[index]]
snake_case__ =SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def a ( UpperCamelCase_ : int ) -> str:
snake_case__ =''
snake_case__ =num // 1000
numerals += m_count * "M"
num %= 1000
snake_case__ =num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
snake_case__ =num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def a ( UpperCamelCase_ : str = "/p089_roman.txt" ) -> int:
snake_case__ =0
with open(os.path.dirname(UpperCamelCase_ ) + roman_numerals_filename ) as filea:
snake_case__ =filea.readlines()
for line in lines:
snake_case__ =line.strip()
snake_case__ =parse_roman_numerals(UpperCamelCase_ )
snake_case__ =generate_roman_numerals(UpperCamelCase_ )
savings += len(UpperCamelCase_ ) - len(UpperCamelCase_ )
return savings
if __name__ == "__main__":
print(f"""{solution() = }""")
| 538 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__ : Tuple = {'''configuration_van''': ['''VAN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VanConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[str] = [
'''VAN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VanForImageClassification''',
'''VanModel''',
'''VanPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 538 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
lowerCAmelCase__ = logging.getLogger(__name__)
@dataclass
class a__ :
"""simple docstring"""
__lowerCamelCase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
__lowerCamelCase = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__lowerCamelCase = field(
default=snake_case , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
@dataclass
class a__ :
"""simple docstring"""
__lowerCamelCase = field(default=snake_case , metadata={'help': 'The input training data file (a text file).'} )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
__lowerCamelCase = field(
default=snake_case , metadata={
'help': (
'The maximum total input sequence length after tokenization. If passed, sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__lowerCamelCase = field(
default=snake_case , metadata={
'help': (
'Whether to pad all samples to the maximum sentence length. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch. More '
'efficient on GPU but very bad for TPU.'
)
} , )
__lowerCamelCase = field(
default=snake_case , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
__lowerCamelCase = field(
default=snake_case , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
if self.train_file is not None:
A__ = self.train_file.split("." )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
A__ = self.validation_file.split("." )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class a__ :
"""simple docstring"""
__lowerCamelCase = 42
__lowerCamelCase = True
__lowerCamelCase = None
__lowerCamelCase = None
def __call__( self , lowercase ) -> Tuple:
'''simple docstring'''
A__ = "label" if "label" in features[0].keys() else "labels"
A__ = [feature.pop(lowercase ) for feature in features]
A__ = len(lowercase )
A__ = len(features[0]["input_ids"] )
A__ = [
[{k: v[i] for k, v in feature.items()} for i in range(lowercase )] for feature in features
]
A__ = list(chain(*lowercase ) )
A__ = self.tokenizer.pad(
lowercase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
# Un-flatten
A__ = {k: v.view(lowercase , lowercase , -1 ) for k, v in batch.items()}
# Add back labels
A__ = torch.tensor(lowercase , dtype=torch.intaa )
return batch
def lowerCAmelCase__ ( ) -> List[Any]:
A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A__ , A__ , A__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A__ , A__ , A__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag" , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
A__ = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE_ )
datasets.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE_ )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
A__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
A__ = {}
if data_args.train_file is not None:
A__ = data_args.train_file
if data_args.validation_file is not None:
A__ = data_args.validation_file
A__ = data_args.train_file.split("." )[-1]
A__ = load_dataset(
SCREAMING_SNAKE_CASE_ , data_files=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
A__ = load_dataset(
"swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
A__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
A__ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
A__ = [F'ending{i}' for i in range(4 )]
A__ = "sent1"
A__ = "sent2"
if data_args.max_seq_length is None:
A__ = tokenizer.model_max_length
if max_seq_length > 1_0_2_4:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
A__ = 1_0_2_4
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
A__ = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(SCREAMING_SNAKE_CASE_: Optional[Any] ):
A__ = [[context] * 4 for context in examples[context_name]]
A__ = examples[question_header_name]
A__ = [
[F'{header} {examples[end][i]}' for end in ending_names] for i, header in enumerate(SCREAMING_SNAKE_CASE_ )
]
# Flatten out
A__ = list(chain(*SCREAMING_SNAKE_CASE_ ) )
A__ = list(chain(*SCREAMING_SNAKE_CASE_ ) )
# Tokenize
A__ = tokenizer(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding="max_length" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
A__ = raw_datasets["train"]
if data_args.max_train_samples is not None:
A__ = min(len(SCREAMING_SNAKE_CASE_ ) , data_args.max_train_samples )
A__ = train_dataset.select(range(SCREAMING_SNAKE_CASE_ ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
A__ = train_dataset.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
A__ = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
A__ = min(len(SCREAMING_SNAKE_CASE_ ) , data_args.max_eval_samples )
A__ = eval_dataset.select(range(SCREAMING_SNAKE_CASE_ ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
A__ = eval_dataset.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
A__ = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(SCREAMING_SNAKE_CASE_: str ):
A__ , A__ = eval_predictions
A__ = np.argmax(SCREAMING_SNAKE_CASE_ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
A__ = Trainer(
model=SCREAMING_SNAKE_CASE_ , args=SCREAMING_SNAKE_CASE_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=SCREAMING_SNAKE_CASE_ , data_collator=SCREAMING_SNAKE_CASE_ , compute_metrics=SCREAMING_SNAKE_CASE_ , )
# Training
if training_args.do_train:
A__ = None
if training_args.resume_from_checkpoint is not None:
A__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A__ = last_checkpoint
A__ = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE_ )
trainer.save_model() # Saves the tokenizer too for easy upload
A__ = train_result.metrics
A__ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(SCREAMING_SNAKE_CASE_ )
)
A__ = min(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
trainer.log_metrics("train" , SCREAMING_SNAKE_CASE_ )
trainer.save_metrics("train" , SCREAMING_SNAKE_CASE_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
A__ = trainer.evaluate()
A__ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(SCREAMING_SNAKE_CASE_ )
A__ = min(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
trainer.log_metrics("eval" , SCREAMING_SNAKE_CASE_ )
trainer.save_metrics("eval" , SCREAMING_SNAKE_CASE_ )
A__ = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**SCREAMING_SNAKE_CASE_ )
else:
trainer.create_model_card(**SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] ) -> Dict:
main()
if __name__ == "__main__":
main()
| 715 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class a__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
A__ = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
A__ = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
A__ = model(lowercase )["last_hidden_state"]
A__ = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , lowercase )
# compare the actual values for a slice.
A__ = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 626 | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.