code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = 1
__A : Optional[int] = 3
__A : List[Any] = (32, 32)
__A : List[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(_UpperCAmelCase)
return image
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
torch.manual_seed(0)
__A : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
torch.manual_seed(0)
__A : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
torch.manual_seed(0)
__A : Tuple = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(_UpperCAmelCase)
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
def extract(*_UpperCAmelCase , **_UpperCAmelCase):
class SCREAMING_SNAKE_CASE :
def __init__( self):
'''simple docstring'''
__A : Optional[int] = torch.ones([0])
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
self.pixel_values.to(_UpperCAmelCase)
return self
return Out()
return extract
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator
__A : int = self.dummy_cond_unet
__A : List[str] = PNDMScheduler(skip_prk_steps=_UpperCAmelCase)
__A : str = self.dummy_vae
__A : str = self.dummy_text_encoder
__A : int = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta')
__A : Union[str, Any] = 77
__A : Union[str, Any] = self.dummy_image.to(_UpperCAmelCase)
__A : Tuple = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
__A : Dict = AltDiffusionImgaImgPipeline(
unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , safety_checker=_UpperCAmelCase , feature_extractor=self.dummy_extractor , )
__A : Union[str, Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_UpperCAmelCase)
__A : int = alt_pipe.to(_UpperCAmelCase)
alt_pipe.set_progress_bar_config(disable=_UpperCAmelCase)
__A : Dict = 'A painting of a squirrel eating a burger'
__A : Optional[Any] = torch.Generator(device=_UpperCAmelCase).manual_seed(0)
__A : Tuple = alt_pipe(
[prompt] , generator=_UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=_UpperCAmelCase , )
__A : Union[str, Any] = output.images
__A : List[Any] = torch.Generator(device=_UpperCAmelCase).manual_seed(0)
__A : Dict = alt_pipe(
[prompt] , generator=_UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=_UpperCAmelCase , return_dict=_UpperCAmelCase , )[0]
__A : Optional[int] = image[0, -3:, -3:, -1]
__A : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__A : int = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 5e-3
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = self.dummy_cond_unet
__A : Dict = PNDMScheduler(skip_prk_steps=_UpperCAmelCase)
__A : Optional[int] = self.dummy_vae
__A : Union[str, Any] = self.dummy_text_encoder
__A : Tuple = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta')
__A : Optional[Any] = 77
__A : Any = self.dummy_image.to(_UpperCAmelCase)
# put models in fp16
__A : Optional[Any] = unet.half()
__A : Optional[int] = vae.half()
__A : str = bert.half()
# make sure here that pndm scheduler skips prk
__A : Any = AltDiffusionImgaImgPipeline(
unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , safety_checker=_UpperCAmelCase , feature_extractor=self.dummy_extractor , )
__A : Dict = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_UpperCAmelCase)
__A : List[Any] = alt_pipe.to(_UpperCAmelCase)
alt_pipe.set_progress_bar_config(disable=_UpperCAmelCase)
__A : Dict = 'A painting of a squirrel eating a burger'
__A : Tuple = torch.manual_seed(0)
__A : List[str] = alt_pipe(
[prompt] , generator=_UpperCAmelCase , num_inference_steps=2 , output_type='np' , image=_UpperCAmelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
# resize to resolution that is divisible by 8 but not 16 or 32
__A : int = init_image.resize((760, 504))
__A : List[str] = 'BAAI/AltDiffusion'
__A : Dict = AltDiffusionImgaImgPipeline.from_pretrained(
_UpperCAmelCase , safety_checker=_UpperCAmelCase , )
pipe.to(_UpperCAmelCase)
pipe.set_progress_bar_config(disable=_UpperCAmelCase)
pipe.enable_attention_slicing()
__A : Any = 'A fantasy landscape, trending on artstation'
__A : List[str] = torch.manual_seed(0)
__A : Any = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=_UpperCAmelCase , output_type='np' , )
__A : Tuple = output.images[0]
__A : List[str] = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
__A : List[str] = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
__A : List[str] = init_image.resize((768, 512))
__A : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy')
__A : Tuple = 'BAAI/AltDiffusion'
__A : Optional[Any] = AltDiffusionImgaImgPipeline.from_pretrained(
_UpperCAmelCase , safety_checker=_UpperCAmelCase , )
pipe.to(_UpperCAmelCase)
pipe.set_progress_bar_config(disable=_UpperCAmelCase)
pipe.enable_attention_slicing()
__A : Optional[int] = 'A fantasy landscape, trending on artstation'
__A : List[str] = torch.manual_seed(0)
__A : Optional[Any] = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=_UpperCAmelCase , output_type='np' , )
__A : Any = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image).max() < 1e-2 | 8 |
'''simple docstring'''
import argparse
import os
import re
lowercase__ : Optional[int] = '''src/diffusers'''
# Pattern that looks at the indentation in a line.
lowercase__ : Dict = re.compile(r'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
lowercase__ : List[str] = re.compile(r'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowercase__ : Tuple = re.compile(r'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
lowercase__ : str = re.compile(r'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowercase__ : str = re.compile(r'''\[([^\]]+)\]''')
def _lowerCAmelCase ( __snake_case : str ) -> Tuple:
__A : List[Any] = _re_indent.search(__snake_case )
return "" if search is None else search.groups()[0]
def _lowerCAmelCase ( __snake_case : Optional[Any] , __snake_case : str="" , __snake_case : Any=None , __snake_case : List[Any]=None ) -> Optional[int]:
__A : Tuple = 0
__A : Optional[int] = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(__snake_case ):
index += 1
__A : Optional[int] = ['\n'.join(lines[:index] )]
else:
__A : Any = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__A : Tuple = [lines[index]]
index += 1
while index < len(__snake_case ) and (end_prompt is None or not lines[index].startswith(__snake_case )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(__snake_case ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(__snake_case ) )
if index < len(__snake_case ) - 1:
__A : Union[str, Any] = [lines[index + 1]]
index += 1
else:
__A : Union[str, Any] = []
else:
blocks.append('\n'.join(__snake_case ) )
__A : Optional[Any] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(__snake_case ) > 0:
blocks.append('\n'.join(__snake_case ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(__snake_case ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def _lowerCAmelCase ( __snake_case : List[Any] ) -> int:
def _inner(__snake_case : List[Any] ):
return key(__snake_case ).lower().replace('_' , '' )
return _inner
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : Any=None ) -> List[Any]:
# If no key is provided, we use a noop.
def noop(__snake_case : List[Any] ):
return x
if key is None:
__A : Optional[Any] = noop
# Constants are all uppercase, they go first.
__A : str = [obj for obj in objects if key(__snake_case ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__A : List[str] = [obj for obj in objects if key(__snake_case )[0].isupper() and not key(__snake_case ).isupper()]
# Functions begin with a lowercase, they go last.
__A : str = [obj for obj in objects if not key(__snake_case )[0].isupper()]
__A : Tuple = ignore_underscore(__snake_case )
return sorted(__snake_case , key=__snake_case ) + sorted(__snake_case , key=__snake_case ) + sorted(__snake_case , key=__snake_case )
def _lowerCAmelCase ( __snake_case : Optional[int] ) -> Tuple:
# This inner function sort imports between [ ].
def _replace(__snake_case : Tuple ):
__A : List[str] = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
__A : int = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__A : Dict = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(__snake_case )] ) + "]"
__A : List[Any] = import_statement.split('\n' )
if len(__snake_case ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__A : Optional[int] = 2 if lines[1].strip() == '[' else 1
__A : Any = [(i, _re_strip_line.search(__snake_case ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__A : Optional[int] = sort_objects(__snake_case , key=lambda __snake_case : x[1] )
__A : Any = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(__snake_case ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__A : Union[str, Any] = _re_bracket_content.sub(_replace , lines[1] )
else:
__A : Dict = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__A : Tuple = keys[:-1]
__A : List[Any] = get_indent(lines[1] ) + ', '.join([f'"{k}"' for k in sort_objects(__snake_case )] )
return "\n".join(__snake_case )
else:
# Finally we have to deal with imports fitting on one line
__A : Optional[Any] = _re_bracket_content.sub(_replace , __snake_case )
return import_statement
def _lowerCAmelCase ( __snake_case : List[Any] , __snake_case : List[Any]=True ) -> Optional[Any]:
with open(__snake_case , 'r' ) as f:
__A : Dict = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__A : str = split_code_in_indented_blocks(
__snake_case , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(__snake_case ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__A : Tuple = main_blocks[block_idx]
__A : int = block.split('\n' )
# Get to the start of the imports.
__A : Tuple = 0
while line_idx < len(__snake_case ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__A : Optional[int] = len(__snake_case )
else:
line_idx += 1
if line_idx >= len(__snake_case ):
continue
# Ignore beginning and last line: they don't contain anything.
__A : Dict = '\n'.join(block_lines[line_idx:-1] )
__A : int = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__A : Optional[int] = split_code_in_indented_blocks(__snake_case , indent_level=__snake_case )
# We have two categories of import key: list or _import_structure[key].append/extend
__A : Any = _re_direct_key if '_import_structure' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__A : Dict = [(pattern.search(__snake_case ).groups()[0] if pattern.search(__snake_case ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__A : Optional[Any] = [(i, key) for i, key in enumerate(__snake_case ) if key is not None]
__A : Tuple = [x[0] for x in sorted(__snake_case , key=lambda __snake_case : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__A : str = 0
__A : Any = []
for i in range(len(__snake_case ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
__A : str = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(__snake_case )
count += 1
# And we put our main block back together with its first and last line.
__A : int = '\n'.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(__snake_case ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(__snake_case , 'w' ) as f:
f.write('\n'.join(__snake_case ) )
def _lowerCAmelCase ( __snake_case : int=True ) -> Optional[Any]:
__A : Tuple = []
for root, _, files in os.walk(__snake_case ):
if "__init__.py" in files:
__A : List[Any] = sort_imports(os.path.join(__snake_case , '__init__.py' ) , check_only=__snake_case )
if result:
__A : Dict = [os.path.join(__snake_case , '__init__.py' )]
if len(__snake_case ) > 0:
raise ValueError(f'Would overwrite {len(__snake_case )} files, run `make style`.' )
if __name__ == "__main__":
lowercase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
lowercase__ : Union[str, Any] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only) | 8 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ : str = {
'''configuration_convbert''': ['''CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvBertConfig''', '''ConvBertOnnxConfig'''],
'''tokenization_convbert''': ['''ConvBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = ['''ConvBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : int = [
'''CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvBertForMaskedLM''',
'''ConvBertForMultipleChoice''',
'''ConvBertForQuestionAnswering''',
'''ConvBertForSequenceClassification''',
'''ConvBertForTokenClassification''',
'''ConvBertLayer''',
'''ConvBertModel''',
'''ConvBertPreTrainedModel''',
'''load_tf_weights_in_convbert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : List[Any] = [
'''TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFConvBertForMaskedLM''',
'''TFConvBertForMultipleChoice''',
'''TFConvBertForQuestionAnswering''',
'''TFConvBertForSequenceClassification''',
'''TFConvBertForTokenClassification''',
'''TFConvBertLayer''',
'''TFConvBertModel''',
'''TFConvBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
lowercase__ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 8 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : int ) -> bool:
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('''Program to check whether a number is a Perfect number or not...''')
lowercase__ : int = int(input('''Enter number: ''').strip())
print(f"""{number} is {"" if perfect(number) else "not "}a Perfect Number.""") | 8 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ : Any = {'''configuration_mbart''': ['''MBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MBartConfig''', '''MBartOnnxConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Union[str, Any] = ['''MBartTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = ['''MBartTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : List[str] = [
'''MBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MBartForCausalLM''',
'''MBartForConditionalGeneration''',
'''MBartForQuestionAnswering''',
'''MBartForSequenceClassification''',
'''MBartModel''',
'''MBartPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = [
'''TFMBartForConditionalGeneration''',
'''TFMBartModel''',
'''TFMBartPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : str = [
'''FlaxMBartForConditionalGeneration''',
'''FlaxMBartForQuestionAnswering''',
'''FlaxMBartForSequenceClassification''',
'''FlaxMBartModel''',
'''FlaxMBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
lowercase__ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 8 |
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Tuple = logging.get_logger(__name__)
lowercase__ : str = [
['''attention''', '''attn'''],
['''encoder_attention''', '''encoder_attn'''],
['''q_lin''', '''q_proj'''],
['''k_lin''', '''k_proj'''],
['''v_lin''', '''v_proj'''],
['''out_lin''', '''out_proj'''],
['''norm_embeddings''', '''layernorm_embedding'''],
['''position_embeddings''', '''embed_positions'''],
['''embeddings''', '''embed_tokens'''],
['''ffn.lin''', '''fc'''],
]
def _lowerCAmelCase ( __snake_case : List[Any] ) -> Tuple:
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__A : Optional[Any] = k.replace(__snake_case , __snake_case )
if k.startswith('encoder' ):
__A : Any = k.replace('.attn' , '.self_attn' )
__A : Any = k.replace('norm1' , 'self_attn_layer_norm' )
__A : str = k.replace('norm2' , 'final_layer_norm' )
elif k.startswith('decoder' ):
__A : Tuple = k.replace('norm1' , 'self_attn_layer_norm' )
__A : str = k.replace('norm2' , 'encoder_attn_layer_norm' )
__A : int = k.replace('norm3' , 'final_layer_norm' )
return k
def _lowerCAmelCase ( __snake_case : List[Any] ) -> Dict:
__A : Optional[int] = [
'model.encoder.layernorm_embedding.weight',
'model.encoder.layernorm_embedding.bias',
'model.decoder.layernorm_embedding.weight',
'model.decoder.layernorm_embedding.bias',
]
for k in keys:
__A : Tuple = sd.pop(__snake_case )
__A : Union[str, Any] = k.replace('layernorm_embedding' , 'layer_norm' )
assert new_k not in sd
__A : str = v
lowercase__ : Tuple = ['''START''']
@torch.no_grad()
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : Any , __snake_case : List[Any] ) -> int:
__A : List[str] = torch.load(__snake_case , map_location='cpu' )
__A : Tuple = model['model']
__A : str = BlenderbotConfig.from_json_file(__snake_case )
__A : int = BlenderbotForConditionalGeneration(__snake_case )
__A : List[Any] = m.model.state_dict().keys()
__A : Optional[int] = []
__A : Optional[int] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__A : Union[str, Any] = rename_state_dict_key(__snake_case )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__A : Optional[Any] = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__snake_case )
m.model.load_state_dict(__snake_case , strict=__snake_case )
m.half()
m.save_pretrained(__snake_case )
if __name__ == "__main__":
lowercase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''')
parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''')
parser.add_argument(
'''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use'''
)
lowercase__ : Optional[Any] = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json) | 8 | 1 |
'''simple docstring'''
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
lowercase__ : List[str] = TypeVar('''T''')
class SCREAMING_SNAKE_CASE (Generic[T] ):
def __init__( self , _UpperCAmelCase = True):
'''simple docstring'''
__A : dict[T, list[T]] = {} # dictionary of lists
__A : Any = directed
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_UpperCAmelCase)
self.adj_list[destination_vertex].append(_UpperCAmelCase)
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_UpperCAmelCase)
__A : str = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(_UpperCAmelCase)
__A : Any = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
__A : int = [destination_vertex]
__A : int = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_UpperCAmelCase)
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_UpperCAmelCase)
__A : Tuple = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
__A : str = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
__A : Any = [destination_vertex]
__A : List[Any] = []
return self
def __repr__( self):
'''simple docstring'''
return pformat(self.adj_list) | 8 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None):
'''simple docstring'''
__A : List[Any] = list(poly_a or [0])[:]
__A : Optional[int] = list(poly_b or [0])[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
__A : Union[str, Any] = len(self.polyA)
while self.polyB[-1] == 0:
self.polyB.pop()
__A : Optional[int] = len(self.polyB)
# Add 0 to make lengths equal a power of 2
__A : Optional[Any] = int(
2 ** np.ceil(np.loga(len(self.polyA) + len(self.polyB) - 1)))
while len(self.polyA) < self.c_max_length:
self.polyA.append(0)
while len(self.polyB) < self.c_max_length:
self.polyB.append(0)
# A complex root used for the fourier transform
__A : str = complex(mpmath.root(x=1 , n=self.c_max_length , k=1))
# The product
__A : Tuple = self.__multiply()
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[int] = [[x] for x in self.polyA] if which == 'A' else [[x] for x in self.polyB]
# Corner case
if len(_UpperCAmelCase) <= 1:
return dft[0]
#
__A : Dict = self.c_max_length // 2
while next_ncol > 0:
__A : Optional[Any] = [[] for i in range(_UpperCAmelCase)]
__A : Tuple = self.root**next_ncol
# First half of next step
__A : Optional[Any] = 1
for j in range(self.c_max_length // (next_ncol * 2)):
for i in range(_UpperCAmelCase):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j])
current_root *= root
# Second half of next step
__A : List[str] = 1
for j in range(self.c_max_length // (next_ncol * 2)):
for i in range(_UpperCAmelCase):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j])
current_root *= root
# Update
__A : Optional[int] = new_dft
__A : Tuple = next_ncol // 2
return dft[0]
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.__dft('A')
__A : Optional[Any] = self.__dft('B')
__A : str = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length)]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0]) <= 1:
return inverce_c[0]
# Inverse DFT
__A : Dict = 2
while next_ncol <= self.c_max_length:
__A : Optional[int] = [[] for i in range(_UpperCAmelCase)]
__A : Any = self.root ** (next_ncol // 2)
__A : Tuple = 1
# First half of next step
for j in range(self.c_max_length // next_ncol):
for i in range(next_ncol // 2):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2)
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root))
current_root *= root
# Update
__A : int = new_inverse_c
next_ncol *= 2
# Unpack
__A : Optional[int] = [round(x[0].real , 8) + round(x[0].imag , 8) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self):
'''simple docstring'''
__A : int = 'A = ' + ' + '.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A]))
__A : Optional[Any] = 'B = ' + ' + '.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B]))
__A : str = 'A*B = ' + ' + '.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.product))
return F'{a}\n{b}\n{c}'
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : int = 50 ) -> int:
__A : Optional[Any] = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f"""{solution() = }""") | 8 |
'''simple docstring'''
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=[30, 30] , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=10 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=None , _UpperCAmelCase=8 , _UpperCAmelCase=10 , ):
'''simple docstring'''
__A : Union[str, Any] = parent
__A : Tuple = batch_size
__A : List[str] = image_size
__A : Dict = patch_size
__A : Optional[Any] = num_channels
__A : Tuple = is_training
__A : Dict = use_labels
__A : List[Any] = hidden_size
__A : Tuple = num_hidden_layers
__A : int = num_attention_heads
__A : Optional[int] = intermediate_size
__A : Tuple = hidden_act
__A : Any = hidden_dropout_prob
__A : Optional[Any] = attention_probs_dropout_prob
__A : List[Any] = type_sequence_label_size
__A : List[Any] = initializer_range
__A : Optional[int] = num_labels
__A : List[Any] = scope
__A : Any = n_targets
__A : Union[str, Any] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
__A : List[str] = (image_size[1] // patch_size) * (image_size[0] // patch_size)
__A : int = num_patches + 1 + self.num_detection_tokens
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]])
__A : Tuple = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
__A : List[Any] = []
for i in range(self.batch_size):
__A : Optional[int] = {}
__A : Union[str, Any] = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=_UpperCAmelCase)
__A : str = torch.rand(self.n_targets , 4 , device=_UpperCAmelCase)
labels.append(_UpperCAmelCase)
__A : Any = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Any = YolosModel(config=_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
__A : Dict = model(_UpperCAmelCase)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Any = YolosForObjectDetection(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
__A : str = model(pixel_values=_UpperCAmelCase)
__A : List[str] = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1))
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4))
__A : Union[str, Any] = model(pixel_values=_UpperCAmelCase , labels=_UpperCAmelCase)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1))
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = self.prepare_config_and_inputs()
__A ,__A ,__A : Tuple = config_and_inputs
__A : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE (a__ , a__ , unittest.TestCase ):
lowerCAmelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
lowerCAmelCase = (
{'''feature-extraction''': YolosModel, '''object-detection''': YolosForObjectDetection} if is_torch_available() else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False):
'''simple docstring'''
__A : Optional[Any] = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase)
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
__A : Any = []
for i in range(self.model_tester.batch_size):
__A : Tuple = {}
__A : Tuple = torch.ones(
size=(self.model_tester.n_targets,) , device=_UpperCAmelCase , dtype=torch.long)
__A : Optional[Any] = torch.ones(
self.model_tester.n_targets , 4 , device=_UpperCAmelCase , dtype=torch.float)
labels.append(_UpperCAmelCase)
__A : str = labels
return inputs_dict
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = YolosModelTester(self)
__A : Dict = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : Tuple = model_class(_UpperCAmelCase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
__A : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : List[Any] = model_class(_UpperCAmelCase)
__A : str = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A : int = [*signature.parameters.keys()]
__A : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__A : Optional[int] = True
# in YOLOS, the seq_len is different
__A : Dict = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
__A : Dict = True
__A : Dict = False
__A : Union[str, Any] = True
__A : Tuple = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : Any = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Union[str, Any] = outputs.attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__A : List[Any] = True
__A : List[str] = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : List[Any] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Optional[Any] = outputs.attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
__A : str = len(_UpperCAmelCase)
# Check attention is always last and order is fine
__A : Dict = True
__A : Dict = True
__A : Dict = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : List[str] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Union[str, Any] = 1
self.assertEqual(out_len + added_hidden_states , len(_UpperCAmelCase))
__A : Optional[Any] = outputs.attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
__A : Tuple = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : List[str] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Optional[Any] = outputs.hidden_states
__A : List[str] = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
# YOLOS has a different seq_length
__A : Dict = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
__A ,__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : List[str] = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A : Optional[int] = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*_UpperCAmelCase)
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : List[Any] = YolosModel.from_pretrained(_UpperCAmelCase)
self.assertIsNotNone(_UpperCAmelCase)
def _lowerCAmelCase ( ) -> int:
__A : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('hustvl/yolos-small') if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = YolosForObjectDetection.from_pretrained('hustvl/yolos-small').to(_UpperCAmelCase)
__A : Any = self.default_image_processor
__A : str = prepare_img()
__A : int = image_processor(images=_UpperCAmelCase , return_tensors='pt').to(_UpperCAmelCase)
# forward pass
with torch.no_grad():
__A : str = model(inputs.pixel_values)
# verify outputs
__A : Tuple = torch.Size((1, 100, 92))
self.assertEqual(outputs.logits.shape , _UpperCAmelCase)
__A : Dict = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=_UpperCAmelCase , )
__A : int = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=_UpperCAmelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _UpperCAmelCase , atol=1e-4))
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , _UpperCAmelCase , atol=1e-4))
# verify postprocessing
__A : List[str] = image_processor.post_process_object_detection(
_UpperCAmelCase , threshold=0.3 , target_sizes=[image.size[::-1]])[0]
__A : Optional[int] = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861]).to(_UpperCAmelCase)
__A : Union[str, Any] = [75, 75, 17, 63, 17]
__A : Any = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495]).to(_UpperCAmelCase)
self.assertEqual(len(results['scores']) , 5)
self.assertTrue(torch.allclose(results['scores'] , _UpperCAmelCase , atol=1e-4))
self.assertSequenceEqual(results['labels'].tolist() , _UpperCAmelCase)
self.assertTrue(torch.allclose(results['boxes'][0, :] , _UpperCAmelCase)) | 8 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
lowercase__ : Dict = logging.get_logger(__name__)
lowercase__ : Union[str, Any] = {
'''openai/imagegpt-small''': '''''',
'''openai/imagegpt-medium''': '''''',
'''openai/imagegpt-large''': '''''',
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''imagegpt'''
lowerCAmelCase = ['''past_key_values''']
lowerCAmelCase = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _UpperCAmelCase=512 + 1 , _UpperCAmelCase=32 * 32 , _UpperCAmelCase=512 , _UpperCAmelCase=24 , _UpperCAmelCase=8 , _UpperCAmelCase=None , _UpperCAmelCase="quick_gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1e-5 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , **_UpperCAmelCase , ):
'''simple docstring'''
__A : str = vocab_size
__A : List[Any] = n_positions
__A : List[Any] = n_embd
__A : Dict = n_layer
__A : str = n_head
__A : List[str] = n_inner
__A : Optional[Any] = activation_function
__A : Optional[Any] = resid_pdrop
__A : str = embd_pdrop
__A : Tuple = attn_pdrop
__A : str = layer_norm_epsilon
__A : Union[str, Any] = initializer_range
__A : Union[str, Any] = scale_attn_weights
__A : List[str] = use_cache
__A : int = scale_attn_by_inverse_layer_idx
__A : Optional[Any] = reorder_and_upcast_attn
__A : Optional[int] = tie_word_embeddings
super().__init__(tie_word_embeddings=_UpperCAmelCase , **_UpperCAmelCase)
class SCREAMING_SNAKE_CASE (a__ ):
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
])
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = 1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = 3 , _UpperCAmelCase = 32 , _UpperCAmelCase = 32 , ):
'''simple docstring'''
__A : List[str] = self._generate_dummy_images(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
__A : Any = dict(preprocessor(images=_UpperCAmelCase , return_tensors=_UpperCAmelCase))
return inputs | 8 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
lowercase__ : Optional[int] = None
lowercase__ : List[str] = logging.get_logger(__name__)
lowercase__ : Optional[Any] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase__ : List[str] = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/tokenizer.json''',
},
}
lowercase__ : Dict = {
'''camembert-base''': 5_12,
}
lowercase__ : str = '''▁'''
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = ['''input_ids''', '''attention_mask''']
lowerCAmelCase = CamembertTokenizer
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] , **_UpperCAmelCase , ):
'''simple docstring'''
__A : int = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else mask_token
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
__A : List[str] = vocab_file
__A : Optional[int] = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__A : Optional[Any] = [self.cls_token_id]
__A : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
__A : Optional[int] = [self.sep_token_id]
__A : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(_UpperCAmelCase):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
__A : List[Any] = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(_UpperCAmelCase):
copyfile(self.vocab_file , _UpperCAmelCase)
return (out_vocab_file,) | 8 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : List[Any] = logging.get_logger(__name__)
lowercase__ : Optional[Any] = {
'''microsoft/swinv2-tiny-patch4-window8-256''': (
'''https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''swinv2'''
lowerCAmelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _UpperCAmelCase=224 , _UpperCAmelCase=4 , _UpperCAmelCase=3 , _UpperCAmelCase=96 , _UpperCAmelCase=[2, 2, 6, 2] , _UpperCAmelCase=[3, 6, 12, 24] , _UpperCAmelCase=7 , _UpperCAmelCase=4.0 , _UpperCAmelCase=True , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.1 , _UpperCAmelCase="gelu" , _UpperCAmelCase=False , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-5 , _UpperCAmelCase=32 , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**_UpperCAmelCase)
__A : Dict = image_size
__A : Optional[int] = patch_size
__A : int = num_channels
__A : Tuple = embed_dim
__A : Dict = depths
__A : str = len(_UpperCAmelCase)
__A : int = num_heads
__A : Optional[int] = window_size
__A : int = mlp_ratio
__A : Optional[Any] = qkv_bias
__A : Dict = hidden_dropout_prob
__A : Union[str, Any] = attention_probs_dropout_prob
__A : Any = drop_path_rate
__A : List[Any] = hidden_act
__A : Optional[Any] = use_absolute_embeddings
__A : List[Any] = layer_norm_eps
__A : Union[str, Any] = initializer_range
__A : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__A : List[str] = int(embed_dim * 2 ** (len(_UpperCAmelCase) - 1))
__A : Dict = (0, 0, 0, 0) | 8 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
lowercase__ : Any = '''hf-internal-testing/tiny-random-bert'''
lowercase__ : Optional[Any] = os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''')
lowercase__ : List[Any] = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6'''
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase)
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(_UpperCAmelCase))
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(_UpperCAmelCase , _UpperCAmelCase)))
with open(os.path.join(_UpperCAmelCase , 'refs' , 'main')) as f:
__A : Any = f.read()
self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'snapshots' , _UpperCAmelCase , _UpperCAmelCase))
self.assertTrue(os.path.isfile(_UpperCAmelCase))
# File is cached at the same place the second time.
__A : Tuple = cached_file(_UpperCAmelCase , _UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase)
# Using a specific revision to test the full commit hash.
__A : List[Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='9b8c223')
self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'snapshots' , _UpperCAmelCase , _UpperCAmelCase))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid model identifier'):
__A : Dict = cached_file('tiny-random-bert' , _UpperCAmelCase)
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid git identifier'):
__A : Optional[int] = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='aaaa')
with self.assertRaisesRegex(_UpperCAmelCase , 'does not appear to have a file named'):
__A : int = cached_file(_UpperCAmelCase , 'conf')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaisesRegex(_UpperCAmelCase , 'does not appear to have a file named'):
__A : Any = cached_file(_UpperCAmelCase , 'conf')
with open(os.path.join(_UpperCAmelCase , 'refs' , 'main')) as f:
__A : Dict = f.read()
self.assertTrue(os.path.isfile(os.path.join(_UpperCAmelCase , '.no_exist' , _UpperCAmelCase , 'conf')))
__A : List[Any] = cached_file(_UpperCAmelCase , 'conf' , _raise_exceptions_for_missing_entries=_UpperCAmelCase)
self.assertIsNone(_UpperCAmelCase)
__A : str = cached_file(_UpperCAmelCase , 'conf' , local_files_only=_UpperCAmelCase , _raise_exceptions_for_missing_entries=_UpperCAmelCase)
self.assertIsNone(_UpperCAmelCase)
__A : List[str] = mock.Mock()
__A : Dict = 500
__A : List[str] = {}
__A : List[Any] = HTTPError
__A : Optional[Any] = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=_UpperCAmelCase) as mock_head:
__A : Dict = cached_file(_UpperCAmelCase , 'conf' , _raise_exceptions_for_connection_errors=_UpperCAmelCase)
self.assertIsNone(_UpperCAmelCase)
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.assertTrue(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase))
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase))
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.assertIsNone(get_file_from_repo('bert-base-cased' , 'ahah.txt'))
# The function raises if the repository does not exist.
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid model identifier'):
get_file_from_repo('bert-base-case' , _UpperCAmelCase)
# The function raises if the revision does not exist.
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid git identifier'):
get_file_from_repo('bert-base-cased' , _UpperCAmelCase , revision='ahaha')
__A : List[str] = get_file_from_repo('bert-base-cased' , _UpperCAmelCase)
# The name is the cached name which is not very easy to test, so instead we load the content.
__A : List[str] = json.loads(open(_UpperCAmelCase , 'r').read())
self.assertEqual(config['hidden_size'] , 768)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__A : Tuple = Path(_UpperCAmelCase) / 'a.txt'
filename.touch()
self.assertEqual(get_file_from_repo(_UpperCAmelCase , 'a.txt') , str(_UpperCAmelCase))
self.assertIsNone(get_file_from_repo(_UpperCAmelCase , 'b.txt')) | 8 | 1 |
'''simple docstring'''
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
lowercase__ : int = '''0.12''' # assumed parallelism: 8
if is_torch_available():
import torch
def _lowerCAmelCase ( __snake_case : List[str] , __snake_case : Dict , __snake_case : str=None ) -> Tuple:
if rng is None:
__A : str = random.Random()
__A : Optional[Any] = 1
for dim in shape:
total_dims *= dim
__A : str = []
for _ in range(__snake_case ):
values.append(rng.randint(0 , vocab_size - 1 ) )
__A : List[Any] = np.array(__snake_case , dtype=jnp.intaa ).reshape(__snake_case )
return output
def _lowerCAmelCase ( __snake_case : Optional[Any] , __snake_case : Optional[int]=None ) -> str:
__A : Optional[int] = ids_tensor(__snake_case , vocab_size=2 , rng=__snake_case )
# make sure that at least one token is attended to for each batch
__A : Any = 1
return attn_mask
@require_flax
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = None
lowerCAmelCase = ()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
__A : str = 2
__A : Dict = inputs['input_ids'].shape[-1] // 2
__A : Union[str, Any] = inputs['input_ids'][:max_batch_size, :sequence_length]
__A : Dict = jnp.ones_like(_UpperCAmelCase)
__A : List[str] = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
__A : Optional[Any] = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
__A : Dict = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A ,__A ,__A : List[Any] = self._get_input_ids_and_config()
__A : Optional[int] = False
__A : List[Any] = max_length
__A : Union[str, Any] = 0
for model_class in self.all_generative_model_classes:
__A : List[Any] = model_class(_UpperCAmelCase)
__A : Optional[int] = model_class.__name__[4:] # Skip the "Flax" at the beginning
__A : List[Any] = getattr(_UpperCAmelCase , _UpperCAmelCase)
__A : str = pt_model_class(_UpperCAmelCase).eval()
__A : int = load_flax_weights_in_pytorch_model(_UpperCAmelCase , flax_model.params)
__A : Dict = flax_model.generate(_UpperCAmelCase).sequences
__A : Optional[Any] = pt_model.generate(torch.tensor(_UpperCAmelCase , dtype=torch.long))
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
__A : Optional[int] = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist())
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A ,__A ,__A : List[Any] = self._get_input_ids_and_config()
__A : Optional[Any] = False
__A : str = max_length
for model_class in self.all_generative_model_classes:
__A : Union[str, Any] = model_class(_UpperCAmelCase)
__A : int = model.generate(_UpperCAmelCase).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase)
__A : Dict = jit(model.generate)
__A : Any = jit_generate(_UpperCAmelCase).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A ,__A ,__A : int = self._get_input_ids_and_config()
__A : Any = True
__A : int = max_length
for model_class in self.all_generative_model_classes:
__A : Union[str, Any] = model_class(_UpperCAmelCase)
__A : Tuple = model.generate(_UpperCAmelCase).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase)
__A : Dict = jit(model.generate)
__A : List[Any] = jit_generate(_UpperCAmelCase).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A ,__A ,__A : Union[str, Any] = self._get_input_ids_and_config()
__A : int = False
__A : int = max_length
__A : str = 2
for model_class in self.all_generative_model_classes:
__A : Optional[int] = model_class(_UpperCAmelCase)
__A : Tuple = model.generate(_UpperCAmelCase).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase)
__A : Dict = jit(model.generate)
__A : str = jit_generate(_UpperCAmelCase).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A ,__A ,__A : str = self._get_input_ids_and_config()
__A : Optional[Any] = False
__A : Optional[Any] = max_length
__A : Any = 2
__A : Dict = 2
for model_class in self.all_generative_model_classes:
__A : Dict = model_class(_UpperCAmelCase)
__A : Any = model.generate(_UpperCAmelCase).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A ,__A ,__A : Any = self._get_input_ids_and_config()
__A : Tuple = True
__A : Any = max_length
__A : Optional[Any] = 0.8
__A : Any = 10
__A : Optional[int] = 0.3
__A : str = 1
__A : Dict = 8
__A : str = 9
for model_class in self.all_generative_model_classes:
__A : Dict = model_class(_UpperCAmelCase)
__A : Tuple = model.generate(_UpperCAmelCase).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase)
__A : List[Any] = jit(model.generate)
__A : List[Any] = jit_generate(_UpperCAmelCase).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A ,__A ,__A : Optional[Any] = self._get_input_ids_and_config()
__A : Union[str, Any] = max_length
__A : Any = 1
__A : str = 8
__A : Dict = 9
for model_class in self.all_generative_model_classes:
__A : int = model_class(_UpperCAmelCase)
__A : Any = model.generate(_UpperCAmelCase).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase)
__A : str = jit(model.generate)
__A : int = jit_generate(_UpperCAmelCase).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A ,__A ,__A : Any = self._get_input_ids_and_config()
__A : str = max_length
__A : Tuple = 2
__A : Optional[Any] = 1
__A : List[Any] = 8
__A : str = 9
for model_class in self.all_generative_model_classes:
__A : Tuple = model_class(_UpperCAmelCase)
__A : Optional[int] = model.generate(_UpperCAmelCase).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase)
__A : Union[str, Any] = jit(model.generate)
__A : str = jit_generate(_UpperCAmelCase).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A ,__A ,__A : List[Any] = self._get_input_ids_and_config()
# pad attention mask on the left
__A : Any = attention_mask.at[(0, 0)].set(0)
__A : Optional[Any] = False
__A : Union[str, Any] = max_length
for model_class in self.all_generative_model_classes:
__A : Any = model_class(_UpperCAmelCase)
__A : Any = model.generate(_UpperCAmelCase , attention_mask=_UpperCAmelCase).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase)
__A : Optional[int] = jit(model.generate)
__A : Union[str, Any] = jit_generate(_UpperCAmelCase , attention_mask=_UpperCAmelCase).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A ,__A ,__A : Optional[Any] = self._get_input_ids_and_config()
# pad attention mask on the left
__A : Dict = attention_mask.at[(0, 0)].set(0)
__A : str = True
__A : Dict = max_length
for model_class in self.all_generative_model_classes:
__A : Any = model_class(_UpperCAmelCase)
__A : List[str] = model.generate(_UpperCAmelCase , attention_mask=_UpperCAmelCase).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase)
__A : Optional[Any] = jit(model.generate)
__A : Union[str, Any] = jit_generate(_UpperCAmelCase , attention_mask=_UpperCAmelCase).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A ,__A ,__A : Optional[int] = self._get_input_ids_and_config()
# pad attention mask on the left
__A : List[str] = attention_mask.at[(0, 0)].set(0)
__A : int = 2
__A : str = max_length
for model_class in self.all_generative_model_classes:
__A : str = model_class(_UpperCAmelCase)
__A : Optional[Any] = model.generate(_UpperCAmelCase , attention_mask=_UpperCAmelCase).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase)
__A : str = jit(model.generate)
__A : List[Any] = jit_generate(_UpperCAmelCase , attention_mask=_UpperCAmelCase).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
@require_flax
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-bert')
__A : List[str] = FlaxAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-bert-flax-only')
__A : List[Any] = 'Hello world'
__A : Union[str, Any] = tokenizer(_UpperCAmelCase , return_tensors='np').input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(_UpperCAmelCase , 'do_samples'):
model.generate(_UpperCAmelCase , do_samples=_UpperCAmelCase)
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(_UpperCAmelCase , 'foo'):
__A : List[str] = {'foo': 'bar'}
model.generate(_UpperCAmelCase , **_UpperCAmelCase) | 8 |
'''simple docstring'''
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def _lowerCAmelCase ( __snake_case : str , __snake_case : str , **__snake_case : List[Any] ) -> Any:
__A : Optional[Any] = AutoConfig.from_pretrained(__snake_case , **__snake_case )
__A : int = AutoModelForSeqaSeqLM.from_config(__snake_case )
model.save_pretrained(__snake_case )
AutoTokenizer.from_pretrained(__snake_case ).save_pretrained(__snake_case )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version) | 8 | 1 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( __snake_case : int , __snake_case : int ) -> list[list[int]]:
__A : list[list[int]] = []
create_all_state(1 , __snake_case , __snake_case , [] , __snake_case )
return result
def _lowerCAmelCase ( __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : list[int] , __snake_case : list[list[int]] , ) -> None:
if level == 0:
total_list.append(current_list[:] )
return
for i in range(__snake_case , total_number - level + 2 ):
current_list.append(__snake_case )
create_all_state(i + 1 , __snake_case , level - 1 , __snake_case , __snake_case )
current_list.pop()
def _lowerCAmelCase ( __snake_case : list[list[int]] ) -> None:
for i in total_list:
print(*__snake_case )
if __name__ == "__main__":
lowercase__ : List[str] = 4
lowercase__ : List[str] = 2
lowercase__ : int = generate_all_combinations(n, k)
print_all_state(total_list) | 8 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
lowercase__ : Any = {
'''google/tapas-base-finetuned-sqa''': (
'''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wtq''': (
'''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wikisql-supervised''': (
'''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-tabfact''': (
'''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''tapas'''
def __init__( self , _UpperCAmelCase=3_0522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1024 , _UpperCAmelCase=[3, 256, 256, 2, 256, 256, 10] , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0 , _UpperCAmelCase=10.0 , _UpperCAmelCase=0 , _UpperCAmelCase=1.0 , _UpperCAmelCase=None , _UpperCAmelCase=1.0 , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=1.0 , _UpperCAmelCase=1.0 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase="ratio" , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=64 , _UpperCAmelCase=32 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase)
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
__A : Dict = vocab_size
__A : Tuple = hidden_size
__A : Any = num_hidden_layers
__A : int = num_attention_heads
__A : Tuple = hidden_act
__A : Tuple = intermediate_size
__A : List[Any] = hidden_dropout_prob
__A : int = attention_probs_dropout_prob
__A : List[str] = max_position_embeddings
__A : Optional[int] = type_vocab_sizes
__A : str = initializer_range
__A : List[str] = layer_norm_eps
# Fine-tuning task hyperparameters
__A : List[str] = positive_label_weight
__A : List[Any] = num_aggregation_labels
__A : Optional[Any] = aggregation_loss_weight
__A : Tuple = use_answer_as_supervision
__A : List[str] = answer_loss_importance
__A : Any = use_normalized_answer_loss
__A : Any = huber_loss_delta
__A : Union[str, Any] = temperature
__A : Tuple = aggregation_temperature
__A : Optional[Any] = use_gumbel_for_cells
__A : List[str] = use_gumbel_for_aggregation
__A : Tuple = average_approximation_function
__A : List[str] = cell_selection_preference
__A : Dict = answer_loss_cutoff
__A : Union[str, Any] = max_num_rows
__A : Optional[Any] = max_num_columns
__A : int = average_logits_per_cell
__A : Optional[Any] = select_one_column
__A : int = allow_empty_column_selection
__A : List[Any] = init_cell_selection_weights_to_zero
__A : int = reset_position_index_per_cell
__A : Union[str, Any] = disable_per_token_loss
# Aggregation hyperparameters
__A : Optional[Any] = aggregation_labels
__A : List[str] = no_aggregation_label_index
if isinstance(self.aggregation_labels , _UpperCAmelCase):
__A : Optional[Any] = {int(_UpperCAmelCase): v for k, v in aggregation_labels.items()} | 8 | 1 |
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
lowercase__ : List[str] = logging.getLogger(__name__)
def _lowerCAmelCase ( __snake_case : List[str] , __snake_case : List[Any] ) -> int:
# save results
if os.path.exists(__snake_case ):
if os.path.exists(os.path.join(__snake_case , 'config.json' ) ) and os.path.isfile(
os.path.join(__snake_case , 'config.json' ) ):
os.remove(os.path.join(__snake_case , 'config.json' ) )
if os.path.exists(os.path.join(__snake_case , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(__snake_case , 'pytorch_model.bin' ) ):
os.remove(os.path.join(__snake_case , 'pytorch_model.bin' ) )
else:
os.makedirs(__snake_case )
model.save_pretrained(__snake_case )
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : List[Any]=False ) -> Any:
__A : Optional[Any] = 2
if unlogit:
__A : Optional[Any] = torch.pow(__snake_case , __snake_case )
__A : Tuple = p * torch.log(__snake_case )
__A : Any = 0
return -plogp.sum(dim=-1 )
def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> Optional[int]:
logger.info('lv, h >\t' + '\t'.join(f'{x + 1}' for x in range(len(__snake_case ) ) ) )
for row in range(len(__snake_case ) ):
if tensor.dtype != torch.long:
logger.info(f'layer {row + 1}:\t' + '\t'.join(f'{x:.5f}' for x in tensor[row].cpu().data ) )
else:
logger.info(f'layer {row + 1}:\t' + '\t'.join(f'{x:d}' for x in tensor[row].cpu().data ) )
def _lowerCAmelCase ( __snake_case : Any , __snake_case : Optional[Any] , __snake_case : str , __snake_case : List[Any]=True , __snake_case : str=True , __snake_case : List[Any]=None , __snake_case : Union[str, Any]=False ) -> Optional[Any]:
__A ,__A : str = model.config.num_hidden_layers, model.config.num_attention_heads
__A : Dict = torch.zeros(__snake_case , __snake_case ).to(args.device )
__A : str = torch.zeros(__snake_case , __snake_case ).to(args.device )
if head_mask is None:
__A : List[Any] = torch.ones(__snake_case , __snake_case ).to(args.device )
head_mask.requires_grad_(requires_grad=__snake_case )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
__A : int = None
__A : int = 0.0
__A : Optional[Any] = 0.0
for step, inputs in enumerate(tqdm(__snake_case , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
__A : Dict = tuple(t.to(args.device ) for t in inputs )
((__A) ,) : Union[str, Any] = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
__A : Any = model(__snake_case , labels=__snake_case , head_mask=__snake_case )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
__A ,__A ,__A : Union[str, Any] = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__snake_case ):
__A : List[Any] = entropy(attn.detach() , __snake_case )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__snake_case ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
__A : Tuple = 2
__A : Dict = torch.pow(torch.pow(__snake_case , __snake_case ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-2_0
if not args.dont_normalize_global_importance:
__A : Any = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(__snake_case )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(__snake_case )
logger.info('Head ranked by importance scores' )
__A : Any = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
__A : int = torch.arange(
head_importance.numel() , device=args.device )
__A : Tuple = head_ranks.view_as(__snake_case )
print_ad_tensor(__snake_case )
return attn_entropy, head_importance, total_loss
def _lowerCAmelCase ( __snake_case : Any , __snake_case : Optional[Any] , __snake_case : Any ) -> Tuple:
__A ,__A ,__A : List[Any] = compute_heads_importance(__snake_case , __snake_case , __snake_case , compute_entropy=__snake_case )
__A : Union[str, Any] = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , __snake_case , original_score * args.masking_threshold )
__A : Optional[int] = torch.ones_like(__snake_case )
__A : Union[str, Any] = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
__A : List[str] = original_score
while current_score >= original_score * args.masking_threshold:
__A : Any = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
__A : Dict = float('Inf' )
__A : List[str] = head_importance.view(-1 ).sort()[1]
if len(__snake_case ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
__A : List[Any] = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
__A : Optional[Any] = new_head_mask.view(-1 )
__A : Dict = 0.0
__A : Union[str, Any] = new_head_mask.view_as(__snake_case )
__A : Optional[Any] = new_head_mask.clone().detach()
print_ad_tensor(__snake_case )
# Compute metric and head importance again
__A ,__A ,__A : Tuple = compute_heads_importance(
__snake_case , __snake_case , __snake_case , compute_entropy=__snake_case , head_mask=__snake_case )
__A : Tuple = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , __snake_case , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , )
logger.info('Final head mask' )
print_ad_tensor(__snake_case )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def _lowerCAmelCase ( __snake_case : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : str , __snake_case : int ) -> int:
__A : Any = datetime.now()
__A ,__A ,__A : Union[str, Any] = compute_heads_importance(
__snake_case , __snake_case , __snake_case , compute_entropy=__snake_case , compute_importance=__snake_case , head_mask=__snake_case )
__A : Tuple = 1 / loss
__A : str = datetime.now() - before_time
__A : Union[str, Any] = sum(p.numel() for p in model.parameters() )
__A : str = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__snake_case ) )
}
for k, v in heads_to_prune.items():
if isinstance(__snake_case , __snake_case ):
__A : Optional[int] = [
v,
]
assert sum(len(__snake_case ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(__snake_case )
__A : int = sum(p.numel() for p in model.parameters() )
__A : Dict = datetime.now()
__A ,__A ,__A : Any = compute_heads_importance(
__snake_case , __snake_case , __snake_case , compute_entropy=__snake_case , compute_importance=__snake_case , head_mask=__snake_case , actually_pruned=__snake_case , )
__A : Dict = 1 / loss
__A : Optional[Any] = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , __snake_case , __snake_case , pruned_num_params / original_num_params * 1_00 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , __snake_case , __snake_case )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_00 )
save_model(__snake_case , args.output_dir )
def _lowerCAmelCase ( ) -> Optional[int]:
__A : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=__snake_case , type=__snake_case , required=__snake_case , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=__snake_case , type=__snake_case , required=__snake_case , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=__snake_case , type=__snake_case , required=__snake_case , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=__snake_case , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=__snake_case , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=__snake_case , type=__snake_case , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=__snake_case , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=__snake_case , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=__snake_case , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=__snake_case , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=1_28 , type=__snake_case , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=__snake_case , help='Batch size.' )
parser.add_argument('--seed' , type=__snake_case , default=42 )
parser.add_argument('--local_rank' , type=__snake_case , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=__snake_case , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=__snake_case , default='' , help='Can be used for distant debugging.' )
__A : Tuple = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__snake_case )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
__A : Optional[int] = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
__A : Optional[Any] = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
__A : Dict = torch.device('cuda' , args.local_rank )
__A : int = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
__A : Optional[Any] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
__A : List[str] = nn.parallel.DistributedDataParallel(
__snake_case , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__snake_case )
elif args.n_gpu > 1:
__A : Tuple = nn.DataParallel(__snake_case )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__snake_case )
torch.save(__snake_case , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , __snake_case )
# Prepare dataset
__A : Any = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
__A : List[Any] = (torch.from_numpy(__snake_case ),)
__A : List[str] = TensorDataset(*__snake_case )
__A : Any = RandomSampler(__snake_case )
__A : List[str] = DataLoader(__snake_case , sampler=__snake_case , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(__snake_case , __snake_case , __snake_case )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
__A : Optional[int] = mask_heads(__snake_case , __snake_case , __snake_case )
prune_heads(__snake_case , __snake_case , __snake_case , __snake_case )
if __name__ == "__main__":
main() | 8 |
'''simple docstring'''
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=sys.maxsize):
'''simple docstring'''
__A : Union[str, Any] = 'bilinear'
__A : int = max_size
__A : Optional[Any] = short_edge_length
def __call__( self , _UpperCAmelCase):
'''simple docstring'''
__A : int = []
for img in imgs:
__A ,__A : Dict = img.shape[:2]
# later: provide list and randomly choose index for resize
__A : List[Any] = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1)
if size == 0:
return img
__A : Tuple = size * 1.0 / min(_UpperCAmelCase , _UpperCAmelCase)
if h < w:
__A ,__A : Optional[Any] = size, scale * w
else:
__A ,__A : Optional[Any] = scale * h, size
if max(_UpperCAmelCase , _UpperCAmelCase) > self.max_size:
__A : Tuple = self.max_size * 1.0 / max(_UpperCAmelCase , _UpperCAmelCase)
__A : Tuple = newh * scale
__A : Dict = neww * scale
__A : Dict = int(neww + 0.5)
__A : Optional[int] = int(newh + 0.5)
if img.dtype == np.uinta:
__A : int = Image.fromarray(_UpperCAmelCase)
__A : Optional[int] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR)
__A : Dict = np.asarray(_UpperCAmelCase)
else:
__A : Optional[Any] = img.permute(2 , 0 , 1).unsqueeze(0) # 3, 0, 1) # hw(c) -> nchw
__A : Dict = nn.functional.interpolate(
_UpperCAmelCase , (newh, neww) , mode=self.interp_method , align_corners=_UpperCAmelCase).squeeze(0)
img_augs.append(_UpperCAmelCase)
return img_augs
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST)
__A : List[Any] = cfg.INPUT.FORMAT
__A : Dict = cfg.SIZE_DIVISIBILITY
__A : str = cfg.PAD_VALUE
__A : Union[str, Any] = cfg.INPUT.MAX_SIZE_TEST
__A : int = cfg.MODEL.DEVICE
__A : Tuple = torch.tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
__A : Union[str, Any] = torch.tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
__A : int = lambda _UpperCAmelCase: (x - self.pixel_mean) / self.pixel_std
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = tuple(max(_UpperCAmelCase) for s in zip(*[img.shape for img in images]))
__A : Dict = [im.shape[-2:] for im in images]
__A : Optional[int] = [
nn.functional.pad(
_UpperCAmelCase , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(_UpperCAmelCase , _UpperCAmelCase)
]
return torch.stack(_UpperCAmelCase), torch.tensor(_UpperCAmelCase)
def __call__( self , _UpperCAmelCase , _UpperCAmelCase=False):
'''simple docstring'''
with torch.no_grad():
if not isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : int = [images]
if single_image:
assert len(_UpperCAmelCase) == 1
for i in range(len(_UpperCAmelCase)):
if isinstance(images[i] , torch.Tensor):
images.insert(_UpperCAmelCase , images.pop(_UpperCAmelCase).to(self.device).float())
elif not isinstance(images[i] , torch.Tensor):
images.insert(
_UpperCAmelCase , torch.as_tensor(img_tensorize(images.pop(_UpperCAmelCase) , input_format=self.input_format))
.to(self.device)
.float() , )
# resize smallest edge
__A : str = torch.tensor([im.shape[:2] for im in images])
__A : List[str] = self.aug(_UpperCAmelCase)
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
__A : Any = [self.normalizer(_UpperCAmelCase) for x in images]
# now pad them to do the following operations
__A ,__A : Any = self.pad(_UpperCAmelCase)
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
__A : str = torch.true_divide(_UpperCAmelCase , _UpperCAmelCase)
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : str ) -> Dict:
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _lowerCAmelCase ( __snake_case : List[str] , __snake_case : Tuple[int, int] ) -> int:
assert torch.isfinite(__snake_case ).all(), "Box tensor contains infinite or NaN!"
__A ,__A : int = box_size
tensor[:, 0].clamp_(min=0 , max=__snake_case )
tensor[:, 1].clamp_(min=0 , max=__snake_case )
tensor[:, 2].clamp_(min=0 , max=__snake_case )
tensor[:, 3].clamp_(min=0 , max=__snake_case ) | 8 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase__ : Optional[int] = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Union[str, Any] = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
lowercase__ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 8 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> Optional[Any]: # noqa: E741
__A : Tuple = len(__snake_case )
__A : Optional[int] = 0
__A : str = [0] * n
__A : int = [False] * n
__A : Tuple = [False] * n
def dfs(__snake_case : List[str] , __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : int ):
if parent == root:
out_edge_count += 1
__A : str = True
__A : Tuple = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
__A : Optional[int] = dfs(__snake_case , __snake_case , __snake_case , __snake_case )
__A : int = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
__A : Tuple = True
# AP found via cycle
if at == low[to]:
__A : Optional[Any] = True
else:
__A : Any = min(low[at] , __snake_case )
return out_edge_count
for i in range(__snake_case ):
if not visited[i]:
__A : Tuple = 0
__A : List[Any] = dfs(__snake_case , __snake_case , -1 , __snake_case )
__A : Union[str, Any] = out_edge_count > 1
for x in range(len(__snake_case ) ):
if is_art[x] is True:
print(__snake_case )
# Adjacency list of graph
lowercase__ : Tuple = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data) | 8 | 1 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def _lowerCAmelCase ( __snake_case : str = "AAPL" ) -> str:
__A : Optional[Any] = f'https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'
__A : Optional[int] = BeautifulSoup(requests.get(__snake_case ).text , 'html.parser' )
__A : Tuple = 'My(6px) Pos(r) smartphone_Mt(6px)'
return soup.find('div' , class_=class_ ).find('span' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""") | 8 |
'''simple docstring'''
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
lowercase__ : int = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn.grep_linear''': '''encoder.layers.*.attention.gru_rel_pos_linear''',
'''self_attn.relative_attention_bias''': '''encoder.layers.*.attention.rel_attn_embed''',
'''self_attn.grep_a''': '''encoder.layers.*.attention.gru_rel_pos_const''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
lowercase__ : Dict = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def _lowerCAmelCase ( __snake_case : int , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Any , __snake_case : List[str] ) -> Union[str, Any]:
for attribute in key.split('.' ):
__A : int = getattr(__snake_case , __snake_case )
if weight_type is not None:
__A : Optional[int] = getattr(__snake_case , __snake_case ).shape
else:
__A : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
__A : Tuple = value
elif weight_type == "weight_g":
__A : Union[str, Any] = value
elif weight_type == "weight_v":
__A : Optional[Any] = value
elif weight_type == "bias":
__A : Optional[int] = value
else:
__A : Optional[int] = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _lowerCAmelCase ( __snake_case : Any , __snake_case : List[str] ) -> List[Any]:
__A : Optional[Any] = []
__A : Any = fairseq_model.state_dict()
__A : Union[str, Any] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
__A : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == 'group' , )
__A : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__A : int = True
if "*" in mapped_key:
__A : Any = name.split(__snake_case )[0].split('.' )[-2]
__A : List[Any] = mapped_key.replace('*' , __snake_case )
if "weight_g" in name:
__A : Optional[Any] = 'weight_g'
elif "weight_v" in name:
__A : Union[str, Any] = 'weight_v'
elif "bias" in name and "relative_attention_bias" not in name:
__A : Optional[Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__A : Tuple = 'weight'
else:
__A : Dict = None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(f'Unused weights: {unused_weights}' )
def _lowerCAmelCase ( __snake_case : List[str] , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Optional[int] ) -> int:
__A : int = full_name.split('conv_layers.' )[-1]
__A : List[str] = name.split('.' )
__A : Optional[int] = int(items[0] )
__A : str = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
__A : Optional[int] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
__A : Union[str, Any] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
__A : Dict = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
__A : Any = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__snake_case )
@torch.no_grad()
def _lowerCAmelCase ( __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Tuple=None ) -> Any:
# load the pre-trained checkpoints
__A : List[str] = torch.load(__snake_case )
__A : Dict = WavLMConfigOrig(checkpoint['cfg'] )
__A : Optional[int] = WavLMOrig(__snake_case )
model.load_state_dict(checkpoint['model'] )
model.eval()
if config_path is not None:
__A : List[Any] = WavLMConfig.from_pretrained(__snake_case )
else:
__A : Dict = WavLMConfig()
__A : Optional[Any] = WavLMModel(__snake_case )
recursively_load_weights(__snake_case , __snake_case )
hf_wavlm.save_pretrained(__snake_case )
if __name__ == "__main__":
lowercase__ : Tuple = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowercase__ : Any = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 8 | 1 |
'''simple docstring'''
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
lowercase__ : int = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn.grep_linear''': '''encoder.layers.*.attention.gru_rel_pos_linear''',
'''self_attn.relative_attention_bias''': '''encoder.layers.*.attention.rel_attn_embed''',
'''self_attn.grep_a''': '''encoder.layers.*.attention.gru_rel_pos_const''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
lowercase__ : Dict = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def _lowerCAmelCase ( __snake_case : int , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Any , __snake_case : List[str] ) -> Union[str, Any]:
for attribute in key.split('.' ):
__A : int = getattr(__snake_case , __snake_case )
if weight_type is not None:
__A : Optional[int] = getattr(__snake_case , __snake_case ).shape
else:
__A : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
__A : Tuple = value
elif weight_type == "weight_g":
__A : Union[str, Any] = value
elif weight_type == "weight_v":
__A : Optional[Any] = value
elif weight_type == "bias":
__A : Optional[int] = value
else:
__A : Optional[int] = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _lowerCAmelCase ( __snake_case : Any , __snake_case : List[str] ) -> List[Any]:
__A : Optional[Any] = []
__A : Any = fairseq_model.state_dict()
__A : Union[str, Any] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
__A : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == 'group' , )
__A : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__A : int = True
if "*" in mapped_key:
__A : Any = name.split(__snake_case )[0].split('.' )[-2]
__A : List[Any] = mapped_key.replace('*' , __snake_case )
if "weight_g" in name:
__A : Optional[Any] = 'weight_g'
elif "weight_v" in name:
__A : Union[str, Any] = 'weight_v'
elif "bias" in name and "relative_attention_bias" not in name:
__A : Optional[Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__A : Tuple = 'weight'
else:
__A : Dict = None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(f'Unused weights: {unused_weights}' )
def _lowerCAmelCase ( __snake_case : List[str] , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Optional[int] ) -> int:
__A : int = full_name.split('conv_layers.' )[-1]
__A : List[str] = name.split('.' )
__A : Optional[int] = int(items[0] )
__A : str = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
__A : Optional[int] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
__A : Union[str, Any] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
__A : Dict = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
__A : Any = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__snake_case )
@torch.no_grad()
def _lowerCAmelCase ( __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Tuple=None ) -> Any:
# load the pre-trained checkpoints
__A : List[str] = torch.load(__snake_case )
__A : Dict = WavLMConfigOrig(checkpoint['cfg'] )
__A : Optional[int] = WavLMOrig(__snake_case )
model.load_state_dict(checkpoint['model'] )
model.eval()
if config_path is not None:
__A : List[Any] = WavLMConfig.from_pretrained(__snake_case )
else:
__A : Dict = WavLMConfig()
__A : Optional[Any] = WavLMModel(__snake_case )
recursively_load_weights(__snake_case , __snake_case )
hf_wavlm.save_pretrained(__snake_case )
if __name__ == "__main__":
lowercase__ : Tuple = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowercase__ : Any = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 8 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = 42
class SCREAMING_SNAKE_CASE (a__ , a__ ):
@register_to_config
def __init__( self , _UpperCAmelCase = 6_5536 , _UpperCAmelCase = None , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , _UpperCAmelCase = 0 , _UpperCAmelCase = "fourier" , _UpperCAmelCase = True , _UpperCAmelCase = False , _UpperCAmelCase = 0.0 , _UpperCAmelCase = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , _UpperCAmelCase = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , _UpperCAmelCase = "UNetMidBlock1D" , _UpperCAmelCase = None , _UpperCAmelCase = (32, 32, 64) , _UpperCAmelCase = None , _UpperCAmelCase = 8 , _UpperCAmelCase = 1 , _UpperCAmelCase = False , ):
'''simple docstring'''
super().__init__()
__A : Dict = sample_size
# time
if time_embedding_type == "fourier":
__A : int = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=_UpperCAmelCase , log=_UpperCAmelCase , flip_sin_to_cos=_UpperCAmelCase)
__A : Any = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
__A : List[str] = Timesteps(
block_out_channels[0] , flip_sin_to_cos=_UpperCAmelCase , downscale_freq_shift=_UpperCAmelCase)
__A : List[str] = block_out_channels[0]
if use_timestep_embedding:
__A : Optional[Any] = block_out_channels[0] * 4
__A : Optional[int] = TimestepEmbedding(
in_channels=_UpperCAmelCase , time_embed_dim=_UpperCAmelCase , act_fn=_UpperCAmelCase , out_dim=block_out_channels[0] , )
__A : Dict = nn.ModuleList([])
__A : Dict = None
__A : Tuple = nn.ModuleList([])
__A : Tuple = None
# down
__A : Any = in_channels
for i, down_block_type in enumerate(_UpperCAmelCase):
__A : Tuple = output_channel
__A : Optional[Any] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
__A : List[str] = i == len(_UpperCAmelCase) - 1
__A : int = get_down_block(
_UpperCAmelCase , num_layers=_UpperCAmelCase , in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(_UpperCAmelCase)
# mid
__A : str = get_mid_block(
_UpperCAmelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=_UpperCAmelCase , add_downsample=_UpperCAmelCase , )
# up
__A : Optional[int] = list(reversed(_UpperCAmelCase))
__A : Optional[int] = reversed_block_out_channels[0]
if out_block_type is None:
__A : str = out_channels
else:
__A : List[Any] = block_out_channels[0]
for i, up_block_type in enumerate(_UpperCAmelCase):
__A : Optional[Any] = output_channel
__A : Optional[Any] = (
reversed_block_out_channels[i + 1] if i < len(_UpperCAmelCase) - 1 else final_upsample_channels
)
__A : Dict = i == len(_UpperCAmelCase) - 1
__A : str = get_up_block(
_UpperCAmelCase , num_layers=_UpperCAmelCase , in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(_UpperCAmelCase)
__A : Optional[int] = output_channel
# out
__A : str = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32)
__A : Optional[Any] = get_out_block(
out_block_type=_UpperCAmelCase , num_groups_out=_UpperCAmelCase , embed_dim=block_out_channels[0] , out_channels=_UpperCAmelCase , act_fn=_UpperCAmelCase , fc_dim=block_out_channels[-1] // 4 , )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True , ):
'''simple docstring'''
__A : Any = timestep
if not torch.is_tensor(_UpperCAmelCase):
__A : Any = torch.tensor([timesteps] , dtype=torch.long , device=sample.device)
elif torch.is_tensor(_UpperCAmelCase) and len(timesteps.shape) == 0:
__A : Any = timesteps[None].to(sample.device)
__A : List[Any] = self.time_proj(_UpperCAmelCase)
if self.config.use_timestep_embedding:
__A : Dict = self.time_mlp(_UpperCAmelCase)
else:
__A : Dict = timestep_embed[..., None]
__A : Tuple = timestep_embed.repeat([1, 1, sample.shape[2]]).to(sample.dtype)
__A : List[Any] = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]))
# 2. down
__A : int = ()
for downsample_block in self.down_blocks:
__A ,__A : int = downsample_block(hidden_states=_UpperCAmelCase , temb=_UpperCAmelCase)
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
__A : Optional[int] = self.mid_block(_UpperCAmelCase , _UpperCAmelCase)
# 4. up
for i, upsample_block in enumerate(self.up_blocks):
__A : Any = down_block_res_samples[-1:]
__A : Optional[int] = down_block_res_samples[:-1]
__A : Any = upsample_block(_UpperCAmelCase , res_hidden_states_tuple=_UpperCAmelCase , temb=_UpperCAmelCase)
# 5. post-process
if self.out_block:
__A : Dict = self.out_block(_UpperCAmelCase , _UpperCAmelCase)
if not return_dict:
return (sample,)
return UNetaDOutput(sample=_UpperCAmelCase) | 8 | 1 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = 0
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = AutoImageProcessor.from_pretrained('openai/clip-vit-base-patch32')
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
__A : Union[str, Any] = Path(_UpperCAmelCase) / 'preprocessor_config.json'
__A : Dict = Path(_UpperCAmelCase) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(_UpperCAmelCase , 'w') , )
json.dump({'model_type': 'clip'} , open(_UpperCAmelCase , 'w'))
__A : List[Any] = AutoImageProcessor.from_pretrained(_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
__A : Tuple = Path(_UpperCAmelCase) / 'preprocessor_config.json'
__A : Tuple = Path(_UpperCAmelCase) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} , open(_UpperCAmelCase , 'w') , )
json.dump({'model_type': 'clip'} , open(_UpperCAmelCase , 'w'))
__A : int = AutoImageProcessor.from_pretrained(_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
__A : Optional[int] = CLIPConfig()
# Create a dummy config file with image_proceesor_type
__A : Union[str, Any] = Path(_UpperCAmelCase) / 'preprocessor_config.json'
__A : List[Any] = Path(_UpperCAmelCase) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(_UpperCAmelCase , 'w') , )
json.dump({'model_type': 'clip'} , open(_UpperCAmelCase , 'w'))
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
__A : int = AutoImageProcessor.from_pretrained(_UpperCAmelCase).to_dict()
config_dict.pop('image_processor_type')
__A : List[Any] = CLIPImageProcessor(**_UpperCAmelCase)
# save in new folder
model_config.save_pretrained(_UpperCAmelCase)
config.save_pretrained(_UpperCAmelCase)
__A : str = AutoImageProcessor.from_pretrained(_UpperCAmelCase)
# make sure private variable is not incorrectly saved
__A : List[str] = json.loads(config.to_json_string())
self.assertTrue('_processor_class' not in dict_as_saved)
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
__A : Dict = Path(_UpperCAmelCase) / 'preprocessor_config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(_UpperCAmelCase , 'w') , )
__A : int = AutoImageProcessor.from_pretrained(_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCAmelCase , 'clip-base is not a local folder and is not a valid model identifier'):
__A : Optional[Any] = AutoImageProcessor.from_pretrained('clip-base')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCAmelCase , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'):
__A : List[Any] = AutoImageProcessor.from_pretrained(_UpperCAmelCase , revision='aaaaaa')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCAmelCase , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
__A : Dict = AutoImageProcessor.from_pretrained('hf-internal-testing/config-no-model')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaises(_UpperCAmelCase):
__A : int = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor')
# If remote code is disabled, we can't load this config.
with self.assertRaises(_UpperCAmelCase):
__A : Dict = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=_UpperCAmelCase)
__A : Union[str, Any] = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=_UpperCAmelCase)
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor')
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_UpperCAmelCase)
__A : List[Any] = AutoImageProcessor.from_pretrained(_UpperCAmelCase , trust_remote_code=_UpperCAmelCase)
self.assertEqual(reloaded_image_processor.__class__.__name__ , 'NewImageProcessor')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
try:
AutoConfig.register('custom' , _UpperCAmelCase)
AutoImageProcessor.register(_UpperCAmelCase , _UpperCAmelCase)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_UpperCAmelCase):
AutoImageProcessor.register(_UpperCAmelCase , _UpperCAmelCase)
with tempfile.TemporaryDirectory() as tmpdirname:
__A : Optional[int] = Path(_UpperCAmelCase) / 'preprocessor_config.json'
__A : Optional[Any] = Path(_UpperCAmelCase) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} , open(_UpperCAmelCase , 'w') , )
json.dump({'model_type': 'clip'} , open(_UpperCAmelCase , 'w'))
__A : Union[str, Any] = CustomImageProcessor.from_pretrained(_UpperCAmelCase)
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_UpperCAmelCase)
__A : List[str] = AutoImageProcessor.from_pretrained(_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = True
try:
AutoConfig.register('custom' , _UpperCAmelCase)
AutoImageProcessor.register(_UpperCAmelCase , _UpperCAmelCase)
# If remote code is not set, the default is to use local
__A : Dict = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor')
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor')
self.assertTrue(image_processor.is_local)
# If remote code is disabled, we load the local one.
__A : Dict = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=_UpperCAmelCase)
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor')
self.assertTrue(image_processor.is_local)
# If remote is enabled, we load from the Hub
__A : Tuple = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=_UpperCAmelCase)
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor')
self.assertTrue(not hasattr(_UpperCAmelCase , 'is_local'))
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] | 8 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : str , __snake_case : str ) -> int:
if len(__snake_case ) != len(__snake_case ):
raise ValueError('String lengths must match!' )
__A : Optional[Any] = 0
for chara, chara in zip(__snake_case , __snake_case ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 1 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ):
'''simple docstring'''
__A : Optional[int] = parent
__A : str = 13
__A : List[Any] = 7
__A : List[str] = True
__A : str = True
__A : Optional[Any] = True
__A : int = True
__A : Dict = 99
__A : Dict = 384
__A : Any = 2
__A : int = 4
__A : Optional[Any] = 37
__A : Optional[int] = 'gelu'
__A : Dict = 0.1
__A : Optional[int] = 0.1
__A : Any = 512
__A : int = 16
__A : List[str] = 2
__A : str = 0.02
__A : Any = 3
__A : str = 4
__A : Union[str, Any] = 128
__A : int = 2
__A : List[Any] = 9
__A : List[Any] = 1
__A : List[Any] = None
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__A : str = None
if self.use_input_mask:
__A : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
__A : Optional[Any] = None
if self.use_token_type_ids:
__A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__A : Optional[int] = None
__A : List[str] = None
__A : Dict = None
if self.use_labels:
__A : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__A : str = ids_tensor([self.batch_size] , self.num_choices)
__A : List[Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : int = TFConvBertModel(config=_UpperCAmelCase)
__A : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__A : Tuple = [input_ids, input_mask]
__A : Any = model(_UpperCAmelCase)
__A : Dict = model(_UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : str = TFConvBertForMaskedLM(config=_UpperCAmelCase)
__A : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : str = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[int] = self.num_labels
__A : Any = TFConvBertForSequenceClassification(config=_UpperCAmelCase)
__A : Optional[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : Dict = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Tuple = self.num_choices
__A : List[str] = TFConvBertForMultipleChoice(config=_UpperCAmelCase)
__A : int = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : Optional[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : List[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : int = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__A : Optional[Any] = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = self.num_labels
__A : List[Any] = TFConvBertForTokenClassification(config=_UpperCAmelCase)
__A : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : int = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[Any] = TFConvBertForQuestionAnswering(config=_UpperCAmelCase)
__A : Any = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : Union[str, Any] = model(_UpperCAmelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = self.prepare_config_and_inputs()
(
(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,
) : Union[str, Any] = config_and_inputs
__A : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE (a__ , a__ , unittest.TestCase ):
lowerCAmelCase = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = TFConvBertModelTester(self)
__A : str = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase)
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__A : List[str] = True
__A : List[str] = True
if hasattr(_UpperCAmelCase , 'use_cache'):
__A : List[Any] = True
__A : str = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length)
__A : Union[str, Any] = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
for model_class in self.all_model_classes:
__A : List[str] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)
__A : Optional[int] = model_class(_UpperCAmelCase)
__A : Optional[Any] = len(model(_UpperCAmelCase))
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase)
__A : Union[str, Any] = os.path.join(_UpperCAmelCase , 'saved_model' , '1')
__A : Tuple = tf.keras.models.load_model(_UpperCAmelCase)
__A : str = model(_UpperCAmelCase)
if self.is_encoder_decoder:
__A : Optional[int] = outputs['encoder_hidden_states']
__A : str = outputs['encoder_attentions']
else:
__A : List[Any] = outputs['hidden_states']
__A : Optional[Any] = outputs['attentions']
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
__A : str = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
self.assertListEqual(
list(output_hidden_states[0].shape[-2:]) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(output_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = TFConvBertModel.from_pretrained('YituTech/conv-bert-base')
self.assertIsNotNone(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__A : Any = True
__A : str = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length)
__A : Any = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length)
__A : int = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
__A : Tuple = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
def check_decoder_attentions_output(_UpperCAmelCase):
__A : List[str] = len(_UpperCAmelCase)
self.assertEqual(out_len % 2 , 0)
__A : Any = outputs.decoder_attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_UpperCAmelCase):
__A : str = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__A : Dict = True
__A : Any = False
__A : str = model_class(_UpperCAmelCase)
__A : List[str] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : List[str] = len(_UpperCAmelCase)
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
if self.is_encoder_decoder:
__A : Union[str, Any] = model_class(_UpperCAmelCase)
__A : int = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_decoder_attentions_output(_UpperCAmelCase)
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__A : int = True
__A : Tuple = model_class(_UpperCAmelCase)
__A : Dict = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
# Check attention is always last and order is fine
__A : Any = True
__A : str = True
__A : Union[str, Any] = model_class(_UpperCAmelCase)
__A : Union[str, Any] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase))
self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
@require_tf
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = TFConvBertModel.from_pretrained('YituTech/conv-bert-base')
__A : str = tf.constant([[0, 1, 2, 3, 4, 5]])
__A : Optional[int] = model(_UpperCAmelCase)[0]
__A : List[Any] = [1, 6, 768]
self.assertEqual(output.shape , _UpperCAmelCase)
__A : Tuple = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
])
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4) | 8 |
'''simple docstring'''
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Tuple = logging.get_logger(__name__)
def _lowerCAmelCase ( __snake_case : str , __snake_case : str ) -> Union[str, Any]:
__A : int = RobertaPreLayerNormConfig.from_pretrained(
__snake_case , architectures=['RobertaPreLayerNormForMaskedLM'] )
# convert state_dict
__A : Tuple = torch.load(hf_hub_download(repo_id=__snake_case , filename='pytorch_model.bin' ) )
__A : str = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('roberta.' ):
__A : Dict = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ):
continue
__A : str = tensor_value
__A : Union[str, Any] = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=__snake_case , config=__snake_case , state_dict=__snake_case )
model.save_pretrained(__snake_case )
# convert tokenizer
__A : List[Any] = AutoTokenizer.from_pretrained(__snake_case )
tokenizer.save_pretrained(__snake_case )
if __name__ == "__main__":
lowercase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint-repo''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowercase__ : Optional[Any] = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path) | 8 | 1 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None):
'''simple docstring'''
__A : List[Any] = list(poly_a or [0])[:]
__A : Optional[int] = list(poly_b or [0])[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
__A : Union[str, Any] = len(self.polyA)
while self.polyB[-1] == 0:
self.polyB.pop()
__A : Optional[int] = len(self.polyB)
# Add 0 to make lengths equal a power of 2
__A : Optional[Any] = int(
2 ** np.ceil(np.loga(len(self.polyA) + len(self.polyB) - 1)))
while len(self.polyA) < self.c_max_length:
self.polyA.append(0)
while len(self.polyB) < self.c_max_length:
self.polyB.append(0)
# A complex root used for the fourier transform
__A : str = complex(mpmath.root(x=1 , n=self.c_max_length , k=1))
# The product
__A : Tuple = self.__multiply()
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[int] = [[x] for x in self.polyA] if which == 'A' else [[x] for x in self.polyB]
# Corner case
if len(_UpperCAmelCase) <= 1:
return dft[0]
#
__A : Dict = self.c_max_length // 2
while next_ncol > 0:
__A : Optional[Any] = [[] for i in range(_UpperCAmelCase)]
__A : Tuple = self.root**next_ncol
# First half of next step
__A : Optional[Any] = 1
for j in range(self.c_max_length // (next_ncol * 2)):
for i in range(_UpperCAmelCase):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j])
current_root *= root
# Second half of next step
__A : List[str] = 1
for j in range(self.c_max_length // (next_ncol * 2)):
for i in range(_UpperCAmelCase):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j])
current_root *= root
# Update
__A : Optional[int] = new_dft
__A : Tuple = next_ncol // 2
return dft[0]
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.__dft('A')
__A : Optional[Any] = self.__dft('B')
__A : str = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length)]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0]) <= 1:
return inverce_c[0]
# Inverse DFT
__A : Dict = 2
while next_ncol <= self.c_max_length:
__A : Optional[int] = [[] for i in range(_UpperCAmelCase)]
__A : Any = self.root ** (next_ncol // 2)
__A : Tuple = 1
# First half of next step
for j in range(self.c_max_length // next_ncol):
for i in range(next_ncol // 2):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2)
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root))
current_root *= root
# Update
__A : int = new_inverse_c
next_ncol *= 2
# Unpack
__A : Optional[int] = [round(x[0].real , 8) + round(x[0].imag , 8) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self):
'''simple docstring'''
__A : int = 'A = ' + ' + '.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A]))
__A : Optional[Any] = 'B = ' + ' + '.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B]))
__A : str = 'A*B = ' + ' + '.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.product))
return F'{a}\n{b}\n{c}'
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
lowercase__ : Dict = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = field(default=a__ , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
lowerCAmelCase = field(
default=a__ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
lowerCAmelCase = field(
default=a__ , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
lowerCAmelCase = field(
default=a__ , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
lowerCAmelCase = field(
default=a__ , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = super().to_dict()
for k, v in d.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : List[Any] = v.to_dict()
return d | 8 | 1 |
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
lowercase__ : str = 0
lowercase__ : Optional[Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowercase__ : List[str] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
lowercase__ : List[Any] = tuple[int, int]
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
'''simple docstring'''
__A : Optional[int] = pos_x
__A : Union[str, Any] = pos_y
__A : List[Any] = (pos_y, pos_x)
__A : str = goal_x
__A : str = goal_y
__A : Any = g_cost
__A : str = parent
__A : Any = self.calculate_heuristic()
__A : List[str] = self.g_cost + self.h_cost
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = self.pos_x - self.goal_x
__A : Tuple = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(_UpperCAmelCase) + abs(_UpperCAmelCase)
else:
return sqrt(dy**2 + dx**2)
def __lt__( self , _UpperCAmelCase):
'''simple docstring'''
return self.f_cost < other.f_cost
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Dict = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _UpperCAmelCase)
__A : List[str] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9999 , _UpperCAmelCase)
__A : Dict = [self.start]
__A : list[Node] = []
__A : Optional[Any] = False
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__A : List[str] = self.open_nodes.pop(0)
if current_node.pos == self.target.pos:
return self.retrace_path(_UpperCAmelCase)
self.closed_nodes.append(_UpperCAmelCase)
__A : Tuple = self.get_successors(_UpperCAmelCase)
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_UpperCAmelCase)
else:
# retrieve the best current path
__A : str = self.open_nodes.pop(self.open_nodes.index(_UpperCAmelCase))
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_UpperCAmelCase)
else:
self.open_nodes.append(_UpperCAmelCase)
return [self.start.pos]
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Tuple = []
for action in delta:
__A : Union[str, Any] = parent.pos_x + action[1]
__A : List[str] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(_UpperCAmelCase) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_UpperCAmelCase , _UpperCAmelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _UpperCAmelCase , ))
return successors
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[int] = node
__A : Optional[int] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x))
__A : Optional[Any] = current_node.parent
path.reverse()
return path
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Union[str, Any] = AStar(_UpperCAmelCase , _UpperCAmelCase)
__A : List[Any] = AStar(_UpperCAmelCase , _UpperCAmelCase)
__A : int = False
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
__A : Tuple = self.fwd_astar.open_nodes.pop(0)
__A : Union[str, Any] = self.bwd_astar.open_nodes.pop(0)
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
_UpperCAmelCase , _UpperCAmelCase)
self.fwd_astar.closed_nodes.append(_UpperCAmelCase)
self.bwd_astar.closed_nodes.append(_UpperCAmelCase)
__A : str = current_bwd_node
__A : str = current_fwd_node
__A : str = {
self.fwd_astar: self.fwd_astar.get_successors(_UpperCAmelCase),
self.bwd_astar: self.bwd_astar.get_successors(_UpperCAmelCase),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(_UpperCAmelCase)
else:
# retrieve the best current path
__A : List[Any] = astar.open_nodes.pop(
astar.open_nodes.index(_UpperCAmelCase))
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(_UpperCAmelCase)
else:
astar.open_nodes.append(_UpperCAmelCase)
return [self.fwd_astar.start.pos]
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[Any] = self.fwd_astar.retrace_path(_UpperCAmelCase)
__A : Optional[int] = self.bwd_astar.retrace_path(_UpperCAmelCase)
bwd_path.pop()
bwd_path.reverse()
__A : Tuple = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
lowercase__ : str = (0, 0)
lowercase__ : Optional[int] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
lowercase__ : Dict = time.time()
lowercase__ : Optional[int] = AStar(init, goal)
lowercase__ : List[Any] = a_star.search()
lowercase__ : Dict = time.time() - start_time
print(f"""AStar execution time = {end_time:f} seconds""")
lowercase__ : Optional[Any] = time.time()
lowercase__ : Optional[int] = BidirectionalAStar(init, goal)
lowercase__ : int = time.time() - bd_start_time
print(f"""BidirectionalAStar execution time = {bd_end_time:f} seconds""") | 8 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : List[Any] = logging.get_logger(__name__)
lowercase__ : Optional[int] = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''lxmert'''
lowerCAmelCase = {}
def __init__( self , _UpperCAmelCase=3_0522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=9500 , _UpperCAmelCase=1600 , _UpperCAmelCase=400 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=9 , _UpperCAmelCase=5 , _UpperCAmelCase=5 , _UpperCAmelCase=2048 , _UpperCAmelCase=4 , _UpperCAmelCase=6.67 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , **_UpperCAmelCase , ):
'''simple docstring'''
__A : Tuple = vocab_size
__A : int = hidden_size
__A : str = num_attention_heads
__A : Tuple = hidden_act
__A : int = intermediate_size
__A : str = hidden_dropout_prob
__A : Optional[int] = attention_probs_dropout_prob
__A : Optional[Any] = max_position_embeddings
__A : Tuple = type_vocab_size
__A : Optional[int] = initializer_range
__A : Any = layer_norm_eps
__A : Optional[Any] = num_qa_labels
__A : Optional[int] = num_object_labels
__A : Any = num_attr_labels
__A : Union[str, Any] = l_layers
__A : Optional[int] = x_layers
__A : List[Any] = r_layers
__A : Tuple = visual_feat_dim
__A : Tuple = visual_pos_dim
__A : Optional[int] = visual_loss_normalizer
__A : int = task_matched
__A : List[Any] = task_mask_lm
__A : Optional[Any] = task_obj_predict
__A : str = task_qa
__A : List[Any] = visual_obj_loss
__A : Optional[Any] = visual_attr_loss
__A : Union[str, Any] = visual_feat_loss
__A : Union[str, Any] = {'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers}
super().__init__(**_UpperCAmelCase) | 8 | 1 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( __snake_case : list[int] , __snake_case : list[int] , __snake_case : int ) -> tuple[float, list[float]]:
__A : int = list(range(len(__snake_case ) ) )
__A : Optional[Any] = [v / w for v, w in zip(__snake_case , __snake_case )]
index.sort(key=lambda __snake_case : ratio[i] , reverse=__snake_case )
__A : float = 0
__A : list[float] = [0] * len(__snake_case )
for i in index:
if weight[i] <= capacity:
__A : Optional[int] = 1
max_value += value[i]
capacity -= weight[i]
else:
__A : List[Any] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 |
'''simple docstring'''
import math
import sys
def _lowerCAmelCase ( __snake_case : int ) -> int:
if number != int(__snake_case ):
raise ValueError('the value of input must be a natural number' )
if number < 0:
raise ValueError('the value of input must not be a negative number' )
if number == 0:
return 1
__A : str = [-1] * (number + 1)
__A : Dict = 0
for i in range(1 , number + 1 ):
__A : int = sys.maxsize
__A : int = int(math.sqrt(__snake_case ) )
for j in range(1 , root + 1 ):
__A : str = 1 + answers[i - (j**2)]
__A : Dict = min(__snake_case , __snake_case )
__A : Union[str, Any] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=3 , _UpperCAmelCase=32 , _UpperCAmelCase=3 , _UpperCAmelCase=10 , _UpperCAmelCase=[8, 16, 32, 64] , _UpperCAmelCase=[1, 1, 2, 1] , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase="relu" , _UpperCAmelCase=3 , _UpperCAmelCase=None , _UpperCAmelCase=["stage2", "stage3", "stage4"] , _UpperCAmelCase=[2, 3, 4] , _UpperCAmelCase=1 , ):
'''simple docstring'''
__A : int = parent
__A : List[str] = batch_size
__A : Dict = image_size
__A : Dict = num_channels
__A : List[str] = embeddings_size
__A : Optional[int] = hidden_sizes
__A : List[str] = depths
__A : List[str] = is_training
__A : Union[str, Any] = use_labels
__A : Dict = hidden_act
__A : Tuple = num_labels
__A : int = scope
__A : Optional[int] = len(_UpperCAmelCase)
__A : Tuple = out_features
__A : List[Any] = out_indices
__A : Optional[Any] = num_groups
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__A : Tuple = None
if self.use_labels:
__A : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels)
__A : Optional[Any] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Dict = BitModel(config=_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
__A : str = model(_UpperCAmelCase)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : int = self.num_labels
__A : Optional[int] = BitForImageClassification(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
__A : List[Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Dict = BitBackbone(config=_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
__A : int = model(_UpperCAmelCase)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:])
# verify backbone works with out_features=None
__A : List[Any] = None
__A : Optional[int] = BitBackbone(config=_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
__A : Optional[Any] = model(_UpperCAmelCase)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = self.prepare_config_and_inputs()
__A ,__A ,__A : List[str] = config_and_inputs
__A : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE (a__ , a__ , unittest.TestCase ):
lowerCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
lowerCAmelCase = (
{'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = BitModelTester(self)
__A : str = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return
@unittest.skip(reason='Bit does not output attentions')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
@unittest.skip(reason='Bit does not use inputs_embeds')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
@unittest.skip(reason='Bit does not support input and output embeddings')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : Dict = model_class(_UpperCAmelCase)
__A : Tuple = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A : List[Any] = [*signature.parameters.keys()]
__A : str = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : Any = model_class(config=_UpperCAmelCase)
for name, module in model.named_modules():
if isinstance(_UpperCAmelCase , (nn.BatchNormad, nn.GroupNorm)):
self.assertTrue(
torch.all(module.weight == 1) , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
self.assertTrue(
torch.all(module.bias == 0) , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
__A : List[str] = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : Union[str, Any] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__A : str = self.model_tester.num_stages
self.assertEqual(len(_UpperCAmelCase) , expected_num_stages + 1)
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__A ,__A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__A : Tuple = ['preactivation', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
__A : Union[str, Any] = layer_type
__A : str = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A : Union[str, Any] = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
@unittest.skip(reason='Bit does not use feedforward chunking')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase)
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : int = BitModel.from_pretrained(_UpperCAmelCase)
self.assertIsNotNone(_UpperCAmelCase)
def _lowerCAmelCase ( ) -> Tuple:
__A : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(_UpperCAmelCase)
__A : str = self.default_image_processor
__A : List[str] = prepare_img()
__A : int = image_processor(images=_UpperCAmelCase , return_tensors='pt').to(_UpperCAmelCase)
# forward pass
with torch.no_grad():
__A : Union[str, Any] = model(**_UpperCAmelCase)
# verify the logits
__A : Tuple = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , _UpperCAmelCase)
__A : Any = torch.tensor([[-0.6526, -0.5263, -1.4398]]).to(_UpperCAmelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4))
@require_torch
class SCREAMING_SNAKE_CASE (a__ , unittest.TestCase ):
lowerCAmelCase = (BitBackbone,) if is_torch_available() else ()
lowerCAmelCase = BitConfig
lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = BitModelTester(self) | 8 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( __snake_case : list[int] , __snake_case : list[int] , __snake_case : int ) -> tuple[float, list[float]]:
__A : int = list(range(len(__snake_case ) ) )
__A : Optional[Any] = [v / w for v, w in zip(__snake_case , __snake_case )]
index.sort(key=lambda __snake_case : ratio[i] , reverse=__snake_case )
__A : float = 0
__A : list[float] = [0] * len(__snake_case )
for i in index:
if weight[i] <= capacity:
__A : Optional[int] = 1
max_value += value[i]
capacity -= weight[i]
else:
__A : List[Any] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 1 |
'''simple docstring'''
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
lowercase__ : List[Any] = logging.getLogger()
def _lowerCAmelCase ( __snake_case : Path , __snake_case : list ) -> List[str]:
__A : Tuple = '\n'.join(__snake_case )
Path(__snake_case ).open('w' ).writelines(__snake_case )
lowercase__ : Optional[Any] = '''patrickvonplaten/t5-tiny-random'''
lowercase__ : List[Any] = '''sshleifer/bart-tiny-random'''
lowercase__ : Optional[Any] = '''sshleifer/tiny-mbart'''
lowercase__ : Any = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class SCREAMING_SNAKE_CASE (a__ ):
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Dict = Path(self.get_auto_remove_tmp_dir()) / 'utest_input.source'
__A : Optional[Any] = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
__A : Optional[int] = [' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.']
_dump_articles(_UpperCAmelCase , _UpperCAmelCase)
__A : Optional[int] = str(Path(self.get_auto_remove_tmp_dir()) / 'scores.json')
__A : Dict = 'translation_en_to_de' if model == T5_TINY else 'summarization'
__A : Any = F'\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n '.split()
with patch.object(_UpperCAmelCase , 'argv' , _UpperCAmelCase):
run_generate()
assert Path(_UpperCAmelCase).exists()
# os.remove(Path(output_file_name))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.run_eval_tester(_UpperCAmelCase)
@parameterized.expand([BART_TINY, MBART_TINY])
@slow
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
self.run_eval_tester(_UpperCAmelCase)
@parameterized.expand([T5_TINY, MBART_TINY])
@slow
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Tuple = Path(self.get_auto_remove_tmp_dir()) / 'utest_input.source'
__A : str = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
__A : int = {
'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'],
'de': [
'Maschinelles Lernen ist großartig, oder?',
'Ich esse gerne Bananen',
'Morgen ist wieder ein toller Tag!',
],
}
__A : Dict = Path(self.get_auto_remove_tmp_dir())
__A : str = str(tmp_dir / 'scores.json')
__A : int = str(tmp_dir / 'val.target')
_dump_articles(_UpperCAmelCase , text['en'])
_dump_articles(_UpperCAmelCase , text['de'])
__A : Optional[int] = 'translation_en_to_de' if model == T5_TINY else 'summarization'
__A : int = F'\n run_eval_search.py\n {model}\n {str(_UpperCAmelCase)}\n {str(_UpperCAmelCase)}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n '.split()
testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'])
with patch.object(_UpperCAmelCase , 'argv' , _UpperCAmelCase):
with CaptureStdout() as cs:
run_search()
__A : str = [' num_beams | length_penalty', model, 'Best score args']
__A : List[Any] = ['Info']
if "translation" in task:
expected_strings.append('bleu')
else:
expected_strings.extend(_UpperCAmelCase)
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(_UpperCAmelCase).exists()
os.remove(Path(_UpperCAmelCase)) | 8 |
'''simple docstring'''
from __future__ import annotations
import math
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase):
'''simple docstring'''
__A : int = size
# approximate the overall size of segment tree with given value
__A : Optional[Any] = [0 for i in range(0 , 4 * size)]
# create array to store lazy update
__A : Optional[Any] = [0 for i in range(0 , 4 * size)]
__A : str = [0 for i in range(0 , 4 * size)] # flag for lazy update
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return idx * 2
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return idx * 2 + 1
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if left_element == right_element:
__A : List[Any] = a[left_element - 1]
else:
__A : List[str] = (left_element + right_element) // 2
self.build(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
self.build(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase)
__A : Any = max(
self.segment_tree[self.left(_UpperCAmelCase)] , self.segment_tree[self.right(_UpperCAmelCase)])
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if self.flag[idx] is True:
__A : Optional[Any] = self.lazy[idx]
__A : Optional[Any] = False
if left_element != right_element:
__A : List[Any] = self.lazy[idx]
__A : Dict = self.lazy[idx]
__A : Tuple = True
__A : Union[str, Any] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
__A : Optional[int] = val
if left_element != right_element:
__A : Tuple = val
__A : Any = val
__A : Tuple = True
__A : Union[str, Any] = True
return True
__A : str = (left_element + right_element) // 2
self.update(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
self.update(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
__A : int = max(
self.segment_tree[self.left(_UpperCAmelCase)] , self.segment_tree[self.right(_UpperCAmelCase)])
return True
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if self.flag[idx] is True:
__A : Union[str, Any] = self.lazy[idx]
__A : List[str] = False
if left_element != right_element:
__A : Union[str, Any] = self.lazy[idx]
__A : Optional[int] = self.lazy[idx]
__A : str = True
__A : Union[str, Any] = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
__A : Any = (left_element + right_element) // 2
__A : int = self.query(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
__A : Union[str, Any] = self.query(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
return max(_UpperCAmelCase , _UpperCAmelCase)
def __str__( self):
'''simple docstring'''
return str([self.query(1 , 1 , self.size , _UpperCAmelCase , _UpperCAmelCase) for i in range(1 , self.size + 1)])
if __name__ == "__main__":
lowercase__ : Union[str, Any] = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
lowercase__ : str = 15
lowercase__ : List[Any] = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 1_11)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 2_35)
print(segt) | 8 | 1 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
lowercase__ : Optional[Any] = sys.version_info >= (3, 10)
def _lowerCAmelCase ( __snake_case : Union[str, Any]=None , __snake_case : int=None ) -> str:
return field(default_factory=lambda: default , metadata=__snake_case )
@dataclass
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 42
@dataclass
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = 42
lowerCAmelCase = field(default='''toto''' , metadata={'''help''': '''help message'''} )
@dataclass
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = False
lowerCAmelCase = True
lowerCAmelCase = None
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''titi'''
lowerCAmelCase = '''toto'''
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''titi'''
lowerCAmelCase = '''toto'''
lowerCAmelCase = 42
@dataclass
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = "toto"
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = BasicEnum(self.foo)
@dataclass
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = "toto"
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = MixedTypeEnum(self.foo)
@dataclass
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = None
lowerCAmelCase = field(default=a__ , metadata={'''help''': '''help message'''} )
lowerCAmelCase = None
lowerCAmelCase = list_field(default=[] )
lowerCAmelCase = list_field(default=[] )
@dataclass
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = list_field(default=[] )
lowerCAmelCase = list_field(default=[1, 2, 3] )
lowerCAmelCase = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] )
lowerCAmelCase = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = field()
lowerCAmelCase = field()
lowerCAmelCase = field()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = BasicEnum(self.required_enum)
@dataclass
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = 42
lowerCAmelCase = field()
lowerCAmelCase = None
lowerCAmelCase = field(default='''toto''' , metadata={'''help''': '''help message'''} )
lowerCAmelCase = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] )
if is_python_no_less_than_3_10:
@dataclass
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = False
lowerCAmelCase = True
lowerCAmelCase = None
@dataclass
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = None
lowerCAmelCase = field(default=a__ , metadata={'''help''': '''help message'''} )
lowerCAmelCase = None
lowerCAmelCase = list_field(default=[] )
lowerCAmelCase = list_field(default=[] )
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
self.assertEqual(len(a._actions) , len(b._actions))
for x, y in zip(a._actions , b._actions):
__A : List[Any] = {k: v for k, v in vars(_UpperCAmelCase).items() if k != 'container'}
__A : Union[str, Any] = {k: v for k, v in vars(_UpperCAmelCase).items() if k != 'container'}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('choices' , _UpperCAmelCase) and yy.get('choices' , _UpperCAmelCase):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['type'](_UpperCAmelCase) , yy['type'](_UpperCAmelCase))
del xx["type"], yy["type"]
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = HfArgumentParser(_UpperCAmelCase)
__A : int = argparse.ArgumentParser()
expected.add_argument('--foo' , type=_UpperCAmelCase , required=_UpperCAmelCase)
expected.add_argument('--bar' , type=_UpperCAmelCase , required=_UpperCAmelCase)
expected.add_argument('--baz' , type=_UpperCAmelCase , required=_UpperCAmelCase)
expected.add_argument('--flag' , type=_UpperCAmelCase , default=_UpperCAmelCase , const=_UpperCAmelCase , nargs='?')
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase)
__A : int = ['--foo', '1', '--baz', 'quux', '--bar', '0.5']
((__A) ,) : Any = parser.parse_args_into_dataclasses(_UpperCAmelCase , look_for_args_file=_UpperCAmelCase)
self.assertFalse(example.flag)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = HfArgumentParser(_UpperCAmelCase)
__A : List[str] = argparse.ArgumentParser()
expected.add_argument('--foo' , default=42 , type=_UpperCAmelCase)
expected.add_argument('--baz' , default='toto' , type=_UpperCAmelCase , help='help message')
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument('--foo' , type=_UpperCAmelCase , default=_UpperCAmelCase , const=_UpperCAmelCase , nargs='?')
expected.add_argument('--baz' , type=_UpperCAmelCase , default=_UpperCAmelCase , const=_UpperCAmelCase , nargs='?')
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('--no_baz' , action='store_false' , default=_UpperCAmelCase , dest='baz')
expected.add_argument('--opt' , type=_UpperCAmelCase , default=_UpperCAmelCase)
__A : str = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_UpperCAmelCase)
for dataclass_type in dataclass_types:
__A : Tuple = HfArgumentParser(_UpperCAmelCase)
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase)
__A : str = parser.parse_args([])
self.assertEqual(_UpperCAmelCase , Namespace(foo=_UpperCAmelCase , baz=_UpperCAmelCase , opt=_UpperCAmelCase))
__A : Tuple = parser.parse_args(['--foo', '--no_baz'])
self.assertEqual(_UpperCAmelCase , Namespace(foo=_UpperCAmelCase , baz=_UpperCAmelCase , opt=_UpperCAmelCase))
__A : Optional[Any] = parser.parse_args(['--foo', '--baz'])
self.assertEqual(_UpperCAmelCase , Namespace(foo=_UpperCAmelCase , baz=_UpperCAmelCase , opt=_UpperCAmelCase))
__A : Optional[int] = parser.parse_args(['--foo', 'True', '--baz', 'True', '--opt', 'True'])
self.assertEqual(_UpperCAmelCase , Namespace(foo=_UpperCAmelCase , baz=_UpperCAmelCase , opt=_UpperCAmelCase))
__A : List[Any] = parser.parse_args(['--foo', 'False', '--baz', 'False', '--opt', 'False'])
self.assertEqual(_UpperCAmelCase , Namespace(foo=_UpperCAmelCase , baz=_UpperCAmelCase , opt=_UpperCAmelCase))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = HfArgumentParser(_UpperCAmelCase)
__A : Tuple = argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=['titi', 'toto', 42] , type=make_choice_type_function(['titi', 'toto', 42]) , )
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase)
__A : int = parser.parse_args([])
self.assertEqual(args.foo , 'toto')
__A : Optional[Any] = parser.parse_args_into_dataclasses([])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto)
__A : Union[str, Any] = parser.parse_args(['--foo', 'titi'])
self.assertEqual(args.foo , 'titi')
__A : List[Any] = parser.parse_args_into_dataclasses(['--foo', 'titi'])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi)
__A : Dict = parser.parse_args(['--foo', '42'])
self.assertEqual(args.foo , 42)
__A : Tuple = parser.parse_args_into_dataclasses(['--foo', '42'])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
@dataclass
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = "toto"
__A : str = HfArgumentParser(_UpperCAmelCase)
__A : Optional[Any] = argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=('titi', 'toto', 42) , type=make_choice_type_function(['titi', 'toto', 42]) , )
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase)
__A : Dict = parser.parse_args([])
self.assertEqual(args.foo , 'toto')
__A : Optional[int] = parser.parse_args(['--foo', 'titi'])
self.assertEqual(args.foo , 'titi')
__A : Optional[Any] = parser.parse_args(['--foo', '42'])
self.assertEqual(args.foo , 42)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = HfArgumentParser(_UpperCAmelCase)
__A : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument('--foo_int' , nargs='+' , default=[] , type=_UpperCAmelCase)
expected.add_argument('--bar_int' , nargs='+' , default=[1, 2, 3] , type=_UpperCAmelCase)
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=_UpperCAmelCase)
expected.add_argument('--foo_float' , nargs='+' , default=[0.1, 0.2, 0.3] , type=_UpperCAmelCase)
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase)
__A : int = parser.parse_args([])
self.assertEqual(
_UpperCAmelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['Hallo', 'Bonjour', 'Hello'] , foo_float=[0.1, 0.2, 0.3]) , )
__A : Optional[int] = parser.parse_args('--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'.split())
self.assertEqual(_UpperCAmelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['a', 'b', 'c'] , foo_float=[0.1, 0.7]))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = argparse.ArgumentParser()
expected.add_argument('--foo' , default=_UpperCAmelCase , type=_UpperCAmelCase)
expected.add_argument('--bar' , default=_UpperCAmelCase , type=_UpperCAmelCase , help='help message')
expected.add_argument('--baz' , default=_UpperCAmelCase , type=_UpperCAmelCase)
expected.add_argument('--ces' , nargs='+' , default=[] , type=_UpperCAmelCase)
expected.add_argument('--des' , nargs='+' , default=[] , type=_UpperCAmelCase)
__A : Optional[int] = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_UpperCAmelCase)
for dataclass_type in dataclass_types:
__A : Dict = HfArgumentParser(_UpperCAmelCase)
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase)
__A : Union[str, Any] = parser.parse_args([])
self.assertEqual(_UpperCAmelCase , Namespace(foo=_UpperCAmelCase , bar=_UpperCAmelCase , baz=_UpperCAmelCase , ces=[] , des=[]))
__A : str = parser.parse_args('--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'.split())
self.assertEqual(_UpperCAmelCase , Namespace(foo=12 , bar=3.14 , baz='42' , ces=['a', 'b', 'c'] , des=[1, 2, 3]))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = HfArgumentParser(_UpperCAmelCase)
__A : Optional[int] = argparse.ArgumentParser()
expected.add_argument('--required_list' , nargs='+' , type=_UpperCAmelCase , required=_UpperCAmelCase)
expected.add_argument('--required_str' , type=_UpperCAmelCase , required=_UpperCAmelCase)
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto']) , choices=['titi', 'toto'] , required=_UpperCAmelCase , )
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = HfArgumentParser(_UpperCAmelCase)
__A : Tuple = argparse.ArgumentParser()
expected.add_argument('--foo' , type=_UpperCAmelCase , required=_UpperCAmelCase)
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto']) , choices=['titi', 'toto'] , required=_UpperCAmelCase , )
expected.add_argument('--opt' , type=_UpperCAmelCase , default=_UpperCAmelCase)
expected.add_argument('--baz' , default='toto' , type=_UpperCAmelCase , help='help message')
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=_UpperCAmelCase)
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = HfArgumentParser(_UpperCAmelCase)
__A : List[str] = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
__A : str = parser.parse_dict(_UpperCAmelCase)[0]
__A : Optional[Any] = BasicExample(**_UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = HfArgumentParser(_UpperCAmelCase)
__A : List[Any] = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
'extra': 42,
}
self.assertRaises(_UpperCAmelCase , parser.parse_dict , _UpperCAmelCase , allow_extra_keys=_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = HfArgumentParser(_UpperCAmelCase)
__A : Union[str, Any] = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__A : List[Any] = os.path.join(_UpperCAmelCase , 'temp_json')
os.mkdir(_UpperCAmelCase)
with open(temp_local_path + '.json' , 'w+') as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase)
__A : Union[str, Any] = parser.parse_yaml_file(Path(temp_local_path + '.json'))[0]
__A : str = BasicExample(**_UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = HfArgumentParser(_UpperCAmelCase)
__A : Optional[Any] = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__A : List[str] = os.path.join(_UpperCAmelCase , 'temp_yaml')
os.mkdir(_UpperCAmelCase)
with open(temp_local_path + '.yaml' , 'w+') as f:
yaml.dump(_UpperCAmelCase , _UpperCAmelCase)
__A : str = parser.parse_yaml_file(Path(temp_local_path + '.yaml'))[0]
__A : Optional[Any] = BasicExample(**_UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = HfArgumentParser(_UpperCAmelCase)
self.assertIsNotNone(_UpperCAmelCase) | 8 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : int , __snake_case : int , __snake_case : int ) -> float:
__A : Dict = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def _lowerCAmelCase ( ) -> Union[str, Any]:
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Optional[int] = logging.get_logger(__name__)
lowercase__ : str = {
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''trocr'''
lowerCAmelCase = ['''past_key_values''']
lowerCAmelCase = {
'''num_attention_heads''': '''decoder_attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''decoder_layers''',
}
def __init__( self , _UpperCAmelCase=5_0265 , _UpperCAmelCase=1024 , _UpperCAmelCase=12 , _UpperCAmelCase=16 , _UpperCAmelCase=4096 , _UpperCAmelCase="gelu" , _UpperCAmelCase=512 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , **_UpperCAmelCase , ):
'''simple docstring'''
__A : List[str] = vocab_size
__A : List[str] = d_model
__A : List[Any] = decoder_layers
__A : List[Any] = decoder_attention_heads
__A : List[str] = decoder_ffn_dim
__A : Union[str, Any] = activation_function
__A : str = max_position_embeddings
__A : Tuple = dropout
__A : Union[str, Any] = attention_dropout
__A : List[Any] = activation_dropout
__A : Optional[int] = init_std
__A : int = decoder_layerdrop
__A : List[Any] = use_cache
__A : str = scale_embedding
__A : Optional[int] = use_learned_position_embeddings
__A : Optional[Any] = layernorm_embedding
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , **_UpperCAmelCase , ) | 8 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ):
'''simple docstring'''
__A : Optional[int] = parent
__A : str = 13
__A : List[Any] = 7
__A : List[str] = True
__A : str = True
__A : Optional[Any] = True
__A : int = True
__A : Dict = 99
__A : Dict = 384
__A : Any = 2
__A : int = 4
__A : Optional[Any] = 37
__A : Optional[int] = 'gelu'
__A : Dict = 0.1
__A : Optional[int] = 0.1
__A : Any = 512
__A : int = 16
__A : List[str] = 2
__A : str = 0.02
__A : Any = 3
__A : str = 4
__A : Union[str, Any] = 128
__A : int = 2
__A : List[Any] = 9
__A : List[Any] = 1
__A : List[Any] = None
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__A : str = None
if self.use_input_mask:
__A : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
__A : Optional[Any] = None
if self.use_token_type_ids:
__A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__A : Optional[int] = None
__A : List[str] = None
__A : Dict = None
if self.use_labels:
__A : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__A : str = ids_tensor([self.batch_size] , self.num_choices)
__A : List[Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : int = TFConvBertModel(config=_UpperCAmelCase)
__A : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__A : Tuple = [input_ids, input_mask]
__A : Any = model(_UpperCAmelCase)
__A : Dict = model(_UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : str = TFConvBertForMaskedLM(config=_UpperCAmelCase)
__A : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : str = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[int] = self.num_labels
__A : Any = TFConvBertForSequenceClassification(config=_UpperCAmelCase)
__A : Optional[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : Dict = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Tuple = self.num_choices
__A : List[str] = TFConvBertForMultipleChoice(config=_UpperCAmelCase)
__A : int = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : Optional[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : List[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : int = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__A : Optional[Any] = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = self.num_labels
__A : List[Any] = TFConvBertForTokenClassification(config=_UpperCAmelCase)
__A : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : int = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[Any] = TFConvBertForQuestionAnswering(config=_UpperCAmelCase)
__A : Any = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : Union[str, Any] = model(_UpperCAmelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = self.prepare_config_and_inputs()
(
(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,
) : Union[str, Any] = config_and_inputs
__A : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE (a__ , a__ , unittest.TestCase ):
lowerCAmelCase = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = TFConvBertModelTester(self)
__A : str = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase)
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__A : List[str] = True
__A : List[str] = True
if hasattr(_UpperCAmelCase , 'use_cache'):
__A : List[Any] = True
__A : str = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length)
__A : Union[str, Any] = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
for model_class in self.all_model_classes:
__A : List[str] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)
__A : Optional[int] = model_class(_UpperCAmelCase)
__A : Optional[Any] = len(model(_UpperCAmelCase))
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase)
__A : Union[str, Any] = os.path.join(_UpperCAmelCase , 'saved_model' , '1')
__A : Tuple = tf.keras.models.load_model(_UpperCAmelCase)
__A : str = model(_UpperCAmelCase)
if self.is_encoder_decoder:
__A : Optional[int] = outputs['encoder_hidden_states']
__A : str = outputs['encoder_attentions']
else:
__A : List[Any] = outputs['hidden_states']
__A : Optional[Any] = outputs['attentions']
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
__A : str = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
self.assertListEqual(
list(output_hidden_states[0].shape[-2:]) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(output_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = TFConvBertModel.from_pretrained('YituTech/conv-bert-base')
self.assertIsNotNone(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__A : Any = True
__A : str = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length)
__A : Any = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length)
__A : int = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
__A : Tuple = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
def check_decoder_attentions_output(_UpperCAmelCase):
__A : List[str] = len(_UpperCAmelCase)
self.assertEqual(out_len % 2 , 0)
__A : Any = outputs.decoder_attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_UpperCAmelCase):
__A : str = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__A : Dict = True
__A : Any = False
__A : str = model_class(_UpperCAmelCase)
__A : List[str] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : List[str] = len(_UpperCAmelCase)
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
if self.is_encoder_decoder:
__A : Union[str, Any] = model_class(_UpperCAmelCase)
__A : int = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_decoder_attentions_output(_UpperCAmelCase)
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__A : int = True
__A : Tuple = model_class(_UpperCAmelCase)
__A : Dict = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
# Check attention is always last and order is fine
__A : Any = True
__A : str = True
__A : Union[str, Any] = model_class(_UpperCAmelCase)
__A : Union[str, Any] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase))
self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
@require_tf
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = TFConvBertModel.from_pretrained('YituTech/conv-bert-base')
__A : str = tf.constant([[0, 1, 2, 3, 4, 5]])
__A : Optional[int] = model(_UpperCAmelCase)[0]
__A : List[Any] = [1, 6, 768]
self.assertEqual(output.shape , _UpperCAmelCase)
__A : Tuple = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
])
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4) | 8 | 1 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = ['''image_processor''', '''tokenizer''']
lowerCAmelCase = '''AutoImageProcessor'''
lowerCAmelCase = '''AutoTokenizer'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
super().__init__(_UpperCAmelCase , _UpperCAmelCase)
__A : Optional[Any] = self.image_processor
def __call__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.')
if text is not None:
__A : int = self.tokenizer(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase)
if images is not None:
__A : List[Any] = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase)
if text is not None and images is not None:
__A : int = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase) , tensor_type=_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase)
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return ["input_ids", "attention_mask", "pixel_values"] | 8 |
'''simple docstring'''
import argparse
import os
import re
lowercase__ : Optional[int] = '''src/diffusers'''
# Pattern that looks at the indentation in a line.
lowercase__ : Dict = re.compile(r'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
lowercase__ : List[str] = re.compile(r'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowercase__ : Tuple = re.compile(r'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
lowercase__ : str = re.compile(r'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowercase__ : str = re.compile(r'''\[([^\]]+)\]''')
def _lowerCAmelCase ( __snake_case : str ) -> Tuple:
__A : List[Any] = _re_indent.search(__snake_case )
return "" if search is None else search.groups()[0]
def _lowerCAmelCase ( __snake_case : Optional[Any] , __snake_case : str="" , __snake_case : Any=None , __snake_case : List[Any]=None ) -> Optional[int]:
__A : Tuple = 0
__A : Optional[int] = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(__snake_case ):
index += 1
__A : Optional[int] = ['\n'.join(lines[:index] )]
else:
__A : Any = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__A : Tuple = [lines[index]]
index += 1
while index < len(__snake_case ) and (end_prompt is None or not lines[index].startswith(__snake_case )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(__snake_case ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(__snake_case ) )
if index < len(__snake_case ) - 1:
__A : Union[str, Any] = [lines[index + 1]]
index += 1
else:
__A : Union[str, Any] = []
else:
blocks.append('\n'.join(__snake_case ) )
__A : Optional[Any] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(__snake_case ) > 0:
blocks.append('\n'.join(__snake_case ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(__snake_case ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def _lowerCAmelCase ( __snake_case : List[Any] ) -> int:
def _inner(__snake_case : List[Any] ):
return key(__snake_case ).lower().replace('_' , '' )
return _inner
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : Any=None ) -> List[Any]:
# If no key is provided, we use a noop.
def noop(__snake_case : List[Any] ):
return x
if key is None:
__A : Optional[Any] = noop
# Constants are all uppercase, they go first.
__A : str = [obj for obj in objects if key(__snake_case ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__A : List[str] = [obj for obj in objects if key(__snake_case )[0].isupper() and not key(__snake_case ).isupper()]
# Functions begin with a lowercase, they go last.
__A : str = [obj for obj in objects if not key(__snake_case )[0].isupper()]
__A : Tuple = ignore_underscore(__snake_case )
return sorted(__snake_case , key=__snake_case ) + sorted(__snake_case , key=__snake_case ) + sorted(__snake_case , key=__snake_case )
def _lowerCAmelCase ( __snake_case : Optional[int] ) -> Tuple:
# This inner function sort imports between [ ].
def _replace(__snake_case : Tuple ):
__A : List[str] = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
__A : int = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__A : Dict = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(__snake_case )] ) + "]"
__A : List[Any] = import_statement.split('\n' )
if len(__snake_case ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__A : Optional[int] = 2 if lines[1].strip() == '[' else 1
__A : Any = [(i, _re_strip_line.search(__snake_case ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__A : Optional[int] = sort_objects(__snake_case , key=lambda __snake_case : x[1] )
__A : Any = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(__snake_case ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__A : Union[str, Any] = _re_bracket_content.sub(_replace , lines[1] )
else:
__A : Dict = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__A : Tuple = keys[:-1]
__A : List[Any] = get_indent(lines[1] ) + ', '.join([f'"{k}"' for k in sort_objects(__snake_case )] )
return "\n".join(__snake_case )
else:
# Finally we have to deal with imports fitting on one line
__A : Optional[Any] = _re_bracket_content.sub(_replace , __snake_case )
return import_statement
def _lowerCAmelCase ( __snake_case : List[Any] , __snake_case : List[Any]=True ) -> Optional[Any]:
with open(__snake_case , 'r' ) as f:
__A : Dict = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__A : str = split_code_in_indented_blocks(
__snake_case , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(__snake_case ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__A : Tuple = main_blocks[block_idx]
__A : int = block.split('\n' )
# Get to the start of the imports.
__A : Tuple = 0
while line_idx < len(__snake_case ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__A : Optional[int] = len(__snake_case )
else:
line_idx += 1
if line_idx >= len(__snake_case ):
continue
# Ignore beginning and last line: they don't contain anything.
__A : Dict = '\n'.join(block_lines[line_idx:-1] )
__A : int = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__A : Optional[int] = split_code_in_indented_blocks(__snake_case , indent_level=__snake_case )
# We have two categories of import key: list or _import_structure[key].append/extend
__A : Any = _re_direct_key if '_import_structure' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__A : Dict = [(pattern.search(__snake_case ).groups()[0] if pattern.search(__snake_case ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__A : Optional[Any] = [(i, key) for i, key in enumerate(__snake_case ) if key is not None]
__A : Tuple = [x[0] for x in sorted(__snake_case , key=lambda __snake_case : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__A : str = 0
__A : Any = []
for i in range(len(__snake_case ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
__A : str = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(__snake_case )
count += 1
# And we put our main block back together with its first and last line.
__A : int = '\n'.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(__snake_case ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(__snake_case , 'w' ) as f:
f.write('\n'.join(__snake_case ) )
def _lowerCAmelCase ( __snake_case : int=True ) -> Optional[Any]:
__A : Tuple = []
for root, _, files in os.walk(__snake_case ):
if "__init__.py" in files:
__A : List[Any] = sort_imports(os.path.join(__snake_case , '__init__.py' ) , check_only=__snake_case )
if result:
__A : Dict = [os.path.join(__snake_case , '__init__.py' )]
if len(__snake_case ) > 0:
raise ValueError(f'Would overwrite {len(__snake_case )} files, run `make style`.' )
if __name__ == "__main__":
lowercase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
lowercase__ : Union[str, Any] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only) | 8 | 1 |
'''simple docstring'''
lowercase__ : Tuple = '''Alexander Joslin'''
import operator as op
from .stack import Stack
def _lowerCAmelCase ( __snake_case : str ) -> int:
__A : Any = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
__A : Stack[int] = Stack()
__A : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__snake_case ) )
elif i in operators:
# RULE 2
operator_stack.push(__snake_case )
elif i == ")":
# RULE 4
__A : List[str] = operator_stack.peek()
operator_stack.pop()
__A : int = operand_stack.peek()
operand_stack.pop()
__A : str = operand_stack.peek()
operand_stack.pop()
__A : int = operators[opr](__snake_case , __snake_case )
operand_stack.push(__snake_case )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowercase__ : Tuple = '''(5 + ((4 * 2) * (2 + 3)))'''
# answer = 45
print(f"""{equation} = {dijkstras_two_stack_algorithm(equation)}""") | 8 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : int ) -> bool:
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('''Program to check whether a number is a Perfect number or not...''')
lowercase__ : int = int(input('''Enter number: ''').strip())
print(f"""{number} is {"" if perfect(number) else "not "}a Perfect Number.""") | 8 | 1 |
'''simple docstring'''
import math
def _lowerCAmelCase ( __snake_case : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCAmelCase ( __snake_case : float = 0.1 ) -> int:
__A : Any = 3
__A : Optional[Any] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(__snake_case )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 |
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Tuple = logging.get_logger(__name__)
lowercase__ : str = [
['''attention''', '''attn'''],
['''encoder_attention''', '''encoder_attn'''],
['''q_lin''', '''q_proj'''],
['''k_lin''', '''k_proj'''],
['''v_lin''', '''v_proj'''],
['''out_lin''', '''out_proj'''],
['''norm_embeddings''', '''layernorm_embedding'''],
['''position_embeddings''', '''embed_positions'''],
['''embeddings''', '''embed_tokens'''],
['''ffn.lin''', '''fc'''],
]
def _lowerCAmelCase ( __snake_case : List[Any] ) -> Tuple:
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__A : Optional[Any] = k.replace(__snake_case , __snake_case )
if k.startswith('encoder' ):
__A : Any = k.replace('.attn' , '.self_attn' )
__A : Any = k.replace('norm1' , 'self_attn_layer_norm' )
__A : str = k.replace('norm2' , 'final_layer_norm' )
elif k.startswith('decoder' ):
__A : Tuple = k.replace('norm1' , 'self_attn_layer_norm' )
__A : str = k.replace('norm2' , 'encoder_attn_layer_norm' )
__A : int = k.replace('norm3' , 'final_layer_norm' )
return k
def _lowerCAmelCase ( __snake_case : List[Any] ) -> Dict:
__A : Optional[int] = [
'model.encoder.layernorm_embedding.weight',
'model.encoder.layernorm_embedding.bias',
'model.decoder.layernorm_embedding.weight',
'model.decoder.layernorm_embedding.bias',
]
for k in keys:
__A : Tuple = sd.pop(__snake_case )
__A : Union[str, Any] = k.replace('layernorm_embedding' , 'layer_norm' )
assert new_k not in sd
__A : str = v
lowercase__ : Tuple = ['''START''']
@torch.no_grad()
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : Any , __snake_case : List[Any] ) -> int:
__A : List[str] = torch.load(__snake_case , map_location='cpu' )
__A : Tuple = model['model']
__A : str = BlenderbotConfig.from_json_file(__snake_case )
__A : int = BlenderbotForConditionalGeneration(__snake_case )
__A : List[Any] = m.model.state_dict().keys()
__A : Optional[int] = []
__A : Optional[int] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__A : Union[str, Any] = rename_state_dict_key(__snake_case )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__A : Optional[Any] = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__snake_case )
m.model.load_state_dict(__snake_case , strict=__snake_case )
m.half()
m.save_pretrained(__snake_case )
if __name__ == "__main__":
lowercase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''')
parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''')
parser.add_argument(
'''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use'''
)
lowercase__ : Optional[Any] = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json) | 8 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = 1
__A : List[str] = 3
__A : str = (32, 32)
__A : List[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(_UpperCAmelCase)
return image
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
torch.manual_seed(0)
__A : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=_UpperCAmelCase , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
torch.manual_seed(0)
__A : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
torch.manual_seed(0)
__A : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
return CLIPTextModel(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
__A : Tuple = self.dummy_cond_unet_upscale
__A : Optional[Any] = DDPMScheduler()
__A : str = DDIMScheduler(prediction_type='v_prediction')
__A : int = self.dummy_vae
__A : Tuple = self.dummy_text_encoder
__A : int = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
__A : str = self.dummy_image.cpu().permute(0 , 2 , 3 , 1)[0]
__A : Tuple = Image.fromarray(np.uinta(_UpperCAmelCase)).convert('RGB').resize((64, 64))
# make sure here that pndm scheduler skips prk
__A : Dict = StableDiffusionUpscalePipeline(
unet=_UpperCAmelCase , low_res_scheduler=_UpperCAmelCase , scheduler=_UpperCAmelCase , vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , max_noise_level=350 , )
__A : Dict = sd_pipe.to(_UpperCAmelCase)
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase)
__A : Dict = 'A painting of a squirrel eating a burger'
__A : Optional[int] = torch.Generator(device=_UpperCAmelCase).manual_seed(0)
__A : Dict = sd_pipe(
[prompt] , image=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
__A : Tuple = output.images
__A : Dict = torch.Generator(device=_UpperCAmelCase).manual_seed(0)
__A : str = sd_pipe(
[prompt] , image=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , return_dict=_UpperCAmelCase , )[0]
__A : Union[str, Any] = image[0, -3:, -3:, -1]
__A : List[Any] = image_from_tuple[0, -3:, -3:, -1]
__A : Dict = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
__A : Optional[Any] = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = 'cpu' # ensure determinism for the device-dependent torch.Generator
__A : Optional[Any] = self.dummy_cond_unet_upscale
__A : Any = DDPMScheduler()
__A : Union[str, Any] = DDIMScheduler(prediction_type='v_prediction')
__A : List[str] = self.dummy_vae
__A : Optional[Any] = self.dummy_text_encoder
__A : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
__A : List[str] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1)[0]
__A : List[Any] = Image.fromarray(np.uinta(_UpperCAmelCase)).convert('RGB').resize((64, 64))
# make sure here that pndm scheduler skips prk
__A : Optional[int] = StableDiffusionUpscalePipeline(
unet=_UpperCAmelCase , low_res_scheduler=_UpperCAmelCase , scheduler=_UpperCAmelCase , vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , max_noise_level=350 , )
__A : Any = sd_pipe.to(_UpperCAmelCase)
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase)
__A : Optional[Any] = 'A painting of a squirrel eating a burger'
__A : List[str] = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
__A : int = output.images
assert image.shape[0] == 2
__A : int = torch.Generator(device=_UpperCAmelCase).manual_seed(0)
__A : int = sd_pipe(
[prompt] , image=_UpperCAmelCase , generator=_UpperCAmelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
__A : Optional[Any] = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = self.dummy_cond_unet_upscale
__A : Any = DDPMScheduler()
__A : Optional[Any] = DDIMScheduler(prediction_type='v_prediction')
__A : Any = self.dummy_vae
__A : Union[str, Any] = self.dummy_text_encoder
__A : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
__A : Any = self.dummy_image.cpu().permute(0 , 2 , 3 , 1)[0]
__A : Union[str, Any] = Image.fromarray(np.uinta(_UpperCAmelCase)).convert('RGB').resize((64, 64))
# put models in fp16, except vae as it overflows in fp16
__A : Optional[Any] = unet.half()
__A : str = text_encoder.half()
# make sure here that pndm scheduler skips prk
__A : Union[str, Any] = StableDiffusionUpscalePipeline(
unet=_UpperCAmelCase , low_res_scheduler=_UpperCAmelCase , scheduler=_UpperCAmelCase , vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , max_noise_level=350 , )
__A : Any = sd_pipe.to(_UpperCAmelCase)
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase)
__A : int = 'A painting of a squirrel eating a burger'
__A : int = torch.manual_seed(0)
__A : Optional[Any] = sd_pipe(
[prompt] , image=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=2 , output_type='np' , ).images
__A : List[str] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png')
__A : Union[str, Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat.npy')
__A : Optional[Any] = 'stabilityai/stable-diffusion-x4-upscaler'
__A : Any = StableDiffusionUpscalePipeline.from_pretrained(_UpperCAmelCase)
pipe.to(_UpperCAmelCase)
pipe.set_progress_bar_config(disable=_UpperCAmelCase)
pipe.enable_attention_slicing()
__A : List[Any] = 'a cat sitting on a park bench'
__A : Optional[int] = torch.manual_seed(0)
__A : Any = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , generator=_UpperCAmelCase , output_type='np' , )
__A : Tuple = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 1e-3
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png')
__A : Union[str, Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat_fp16.npy')
__A : Any = 'stabilityai/stable-diffusion-x4-upscaler'
__A : Tuple = StableDiffusionUpscalePipeline.from_pretrained(
_UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(_UpperCAmelCase)
pipe.set_progress_bar_config(disable=_UpperCAmelCase)
pipe.enable_attention_slicing()
__A : List[str] = 'a cat sitting on a park bench'
__A : Tuple = torch.manual_seed(0)
__A : int = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , generator=_UpperCAmelCase , output_type='np' , )
__A : List[str] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 5e-1
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__A : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png')
__A : Any = 'stabilityai/stable-diffusion-x4-upscaler'
__A : Dict = StableDiffusionUpscalePipeline.from_pretrained(
_UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(_UpperCAmelCase)
pipe.set_progress_bar_config(disable=_UpperCAmelCase)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
__A : Optional[int] = 'a cat sitting on a park bench'
__A : Dict = torch.manual_seed(0)
__A : Tuple = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=5 , output_type='np' , )
__A : Any = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9 | 8 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None):
'''simple docstring'''
__A : List[Any] = list(poly_a or [0])[:]
__A : Optional[int] = list(poly_b or [0])[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
__A : Union[str, Any] = len(self.polyA)
while self.polyB[-1] == 0:
self.polyB.pop()
__A : Optional[int] = len(self.polyB)
# Add 0 to make lengths equal a power of 2
__A : Optional[Any] = int(
2 ** np.ceil(np.loga(len(self.polyA) + len(self.polyB) - 1)))
while len(self.polyA) < self.c_max_length:
self.polyA.append(0)
while len(self.polyB) < self.c_max_length:
self.polyB.append(0)
# A complex root used for the fourier transform
__A : str = complex(mpmath.root(x=1 , n=self.c_max_length , k=1))
# The product
__A : Tuple = self.__multiply()
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[int] = [[x] for x in self.polyA] if which == 'A' else [[x] for x in self.polyB]
# Corner case
if len(_UpperCAmelCase) <= 1:
return dft[0]
#
__A : Dict = self.c_max_length // 2
while next_ncol > 0:
__A : Optional[Any] = [[] for i in range(_UpperCAmelCase)]
__A : Tuple = self.root**next_ncol
# First half of next step
__A : Optional[Any] = 1
for j in range(self.c_max_length // (next_ncol * 2)):
for i in range(_UpperCAmelCase):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j])
current_root *= root
# Second half of next step
__A : List[str] = 1
for j in range(self.c_max_length // (next_ncol * 2)):
for i in range(_UpperCAmelCase):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j])
current_root *= root
# Update
__A : Optional[int] = new_dft
__A : Tuple = next_ncol // 2
return dft[0]
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.__dft('A')
__A : Optional[Any] = self.__dft('B')
__A : str = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length)]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0]) <= 1:
return inverce_c[0]
# Inverse DFT
__A : Dict = 2
while next_ncol <= self.c_max_length:
__A : Optional[int] = [[] for i in range(_UpperCAmelCase)]
__A : Any = self.root ** (next_ncol // 2)
__A : Tuple = 1
# First half of next step
for j in range(self.c_max_length // next_ncol):
for i in range(next_ncol // 2):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2)
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root))
current_root *= root
# Update
__A : int = new_inverse_c
next_ncol *= 2
# Unpack
__A : Optional[int] = [round(x[0].real , 8) + round(x[0].imag , 8) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self):
'''simple docstring'''
__A : int = 'A = ' + ' + '.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A]))
__A : Optional[Any] = 'B = ' + ' + '.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B]))
__A : str = 'A*B = ' + ' + '.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.product))
return F'{a}\n{b}\n{c}'
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 1 |
'''simple docstring'''
import random
def _lowerCAmelCase ( __snake_case : int ) -> bool:
__A : Tuple = num - 1
__A : Optional[Any] = 0
while s % 2 == 0:
__A : Optional[int] = s // 2
t += 1
for _ in range(5 ):
__A : List[str] = random.randrange(2 , num - 1 )
__A : str = pow(__snake_case , __snake_case , __snake_case )
if v != 1:
__A : Optional[int] = 0
while v != (num - 1):
if i == t - 1:
return False
else:
__A : Optional[int] = i + 1
__A : Optional[int] = (v**2) % num
return True
def _lowerCAmelCase ( __snake_case : int ) -> bool:
if num < 2:
return False
__A : Optional[int] = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
1_01,
1_03,
1_07,
1_09,
1_13,
1_27,
1_31,
1_37,
1_39,
1_49,
1_51,
1_57,
1_63,
1_67,
1_73,
1_79,
1_81,
1_91,
1_93,
1_97,
1_99,
2_11,
2_23,
2_27,
2_29,
2_33,
2_39,
2_41,
2_51,
2_57,
2_63,
2_69,
2_71,
2_77,
2_81,
2_83,
2_93,
3_07,
3_11,
3_13,
3_17,
3_31,
3_37,
3_47,
3_49,
3_53,
3_59,
3_67,
3_73,
3_79,
3_83,
3_89,
3_97,
4_01,
4_09,
4_19,
4_21,
4_31,
4_33,
4_39,
4_43,
4_49,
4_57,
4_61,
4_63,
4_67,
4_79,
4_87,
4_91,
4_99,
5_03,
5_09,
5_21,
5_23,
5_41,
5_47,
5_57,
5_63,
5_69,
5_71,
5_77,
5_87,
5_93,
5_99,
6_01,
6_07,
6_13,
6_17,
6_19,
6_31,
6_41,
6_43,
6_47,
6_53,
6_59,
6_61,
6_73,
6_77,
6_83,
6_91,
7_01,
7_09,
7_19,
7_27,
7_33,
7_39,
7_43,
7_51,
7_57,
7_61,
7_69,
7_73,
7_87,
7_97,
8_09,
8_11,
8_21,
8_23,
8_27,
8_29,
8_39,
8_53,
8_57,
8_59,
8_63,
8_77,
8_81,
8_83,
8_87,
9_07,
9_11,
9_19,
9_29,
9_37,
9_41,
9_47,
9_53,
9_67,
9_71,
9_77,
9_83,
9_91,
9_97,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(__snake_case )
def _lowerCAmelCase ( __snake_case : int = 10_24 ) -> int:
while True:
__A : Any = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(__snake_case ):
return num
if __name__ == "__main__":
lowercase__ : List[str] = generate_large_prime()
print(('''Prime number:''', num))
print(('''is_prime_low_num:''', is_prime_low_num(num))) | 8 |
'''simple docstring'''
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=[30, 30] , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=10 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=None , _UpperCAmelCase=8 , _UpperCAmelCase=10 , ):
'''simple docstring'''
__A : Union[str, Any] = parent
__A : Tuple = batch_size
__A : List[str] = image_size
__A : Dict = patch_size
__A : Optional[Any] = num_channels
__A : Tuple = is_training
__A : Dict = use_labels
__A : List[Any] = hidden_size
__A : Tuple = num_hidden_layers
__A : int = num_attention_heads
__A : Optional[int] = intermediate_size
__A : Tuple = hidden_act
__A : Any = hidden_dropout_prob
__A : Optional[Any] = attention_probs_dropout_prob
__A : List[Any] = type_sequence_label_size
__A : List[Any] = initializer_range
__A : Optional[int] = num_labels
__A : List[Any] = scope
__A : Any = n_targets
__A : Union[str, Any] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
__A : List[str] = (image_size[1] // patch_size) * (image_size[0] // patch_size)
__A : int = num_patches + 1 + self.num_detection_tokens
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]])
__A : Tuple = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
__A : List[Any] = []
for i in range(self.batch_size):
__A : Optional[int] = {}
__A : Union[str, Any] = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=_UpperCAmelCase)
__A : str = torch.rand(self.n_targets , 4 , device=_UpperCAmelCase)
labels.append(_UpperCAmelCase)
__A : Any = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Any = YolosModel(config=_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
__A : Dict = model(_UpperCAmelCase)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Any = YolosForObjectDetection(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
__A : str = model(pixel_values=_UpperCAmelCase)
__A : List[str] = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1))
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4))
__A : Union[str, Any] = model(pixel_values=_UpperCAmelCase , labels=_UpperCAmelCase)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1))
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = self.prepare_config_and_inputs()
__A ,__A ,__A : Tuple = config_and_inputs
__A : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE (a__ , a__ , unittest.TestCase ):
lowerCAmelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
lowerCAmelCase = (
{'''feature-extraction''': YolosModel, '''object-detection''': YolosForObjectDetection} if is_torch_available() else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False):
'''simple docstring'''
__A : Optional[Any] = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase)
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
__A : Any = []
for i in range(self.model_tester.batch_size):
__A : Tuple = {}
__A : Tuple = torch.ones(
size=(self.model_tester.n_targets,) , device=_UpperCAmelCase , dtype=torch.long)
__A : Optional[Any] = torch.ones(
self.model_tester.n_targets , 4 , device=_UpperCAmelCase , dtype=torch.float)
labels.append(_UpperCAmelCase)
__A : str = labels
return inputs_dict
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = YolosModelTester(self)
__A : Dict = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : Tuple = model_class(_UpperCAmelCase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
__A : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : List[Any] = model_class(_UpperCAmelCase)
__A : str = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A : int = [*signature.parameters.keys()]
__A : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__A : Optional[int] = True
# in YOLOS, the seq_len is different
__A : Dict = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
__A : Dict = True
__A : Dict = False
__A : Union[str, Any] = True
__A : Tuple = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : Any = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Union[str, Any] = outputs.attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__A : List[Any] = True
__A : List[str] = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : List[Any] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Optional[Any] = outputs.attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
__A : str = len(_UpperCAmelCase)
# Check attention is always last and order is fine
__A : Dict = True
__A : Dict = True
__A : Dict = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : List[str] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Union[str, Any] = 1
self.assertEqual(out_len + added_hidden_states , len(_UpperCAmelCase))
__A : Optional[Any] = outputs.attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
__A : Tuple = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : List[str] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Optional[Any] = outputs.hidden_states
__A : List[str] = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
# YOLOS has a different seq_length
__A : Dict = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
__A ,__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : List[str] = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A : Optional[int] = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*_UpperCAmelCase)
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : List[Any] = YolosModel.from_pretrained(_UpperCAmelCase)
self.assertIsNotNone(_UpperCAmelCase)
def _lowerCAmelCase ( ) -> int:
__A : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('hustvl/yolos-small') if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = YolosForObjectDetection.from_pretrained('hustvl/yolos-small').to(_UpperCAmelCase)
__A : Any = self.default_image_processor
__A : str = prepare_img()
__A : int = image_processor(images=_UpperCAmelCase , return_tensors='pt').to(_UpperCAmelCase)
# forward pass
with torch.no_grad():
__A : str = model(inputs.pixel_values)
# verify outputs
__A : Tuple = torch.Size((1, 100, 92))
self.assertEqual(outputs.logits.shape , _UpperCAmelCase)
__A : Dict = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=_UpperCAmelCase , )
__A : int = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=_UpperCAmelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _UpperCAmelCase , atol=1e-4))
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , _UpperCAmelCase , atol=1e-4))
# verify postprocessing
__A : List[str] = image_processor.post_process_object_detection(
_UpperCAmelCase , threshold=0.3 , target_sizes=[image.size[::-1]])[0]
__A : Optional[int] = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861]).to(_UpperCAmelCase)
__A : Union[str, Any] = [75, 75, 17, 63, 17]
__A : Any = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495]).to(_UpperCAmelCase)
self.assertEqual(len(results['scores']) , 5)
self.assertTrue(torch.allclose(results['scores'] , _UpperCAmelCase , atol=1e-4))
self.assertSequenceEqual(results['labels'].tolist() , _UpperCAmelCase)
self.assertTrue(torch.allclose(results['boxes'][0, :] , _UpperCAmelCase)) | 8 | 1 |
'''simple docstring'''
import re
def _lowerCAmelCase ( __snake_case : str ) -> bool:
__A : Dict = re.compile(r'^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$' )
if match := re.search(__snake_case , __snake_case ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('''+918827897895''')) | 8 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
lowercase__ : Optional[int] = None
lowercase__ : List[str] = logging.get_logger(__name__)
lowercase__ : Optional[Any] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase__ : List[str] = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/tokenizer.json''',
},
}
lowercase__ : Dict = {
'''camembert-base''': 5_12,
}
lowercase__ : str = '''▁'''
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = ['''input_ids''', '''attention_mask''']
lowerCAmelCase = CamembertTokenizer
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] , **_UpperCAmelCase , ):
'''simple docstring'''
__A : int = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else mask_token
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
__A : List[str] = vocab_file
__A : Optional[int] = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__A : Optional[Any] = [self.cls_token_id]
__A : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
__A : Optional[int] = [self.sep_token_id]
__A : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(_UpperCAmelCase):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
__A : List[Any] = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(_UpperCAmelCase):
copyfile(self.vocab_file , _UpperCAmelCase)
return (out_vocab_file,) | 8 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : List[Any] = logging.get_logger(__name__)
lowercase__ : Optional[Any] = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''informer'''
lowerCAmelCase = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = "student_t" , _UpperCAmelCase = "nll" , _UpperCAmelCase = 1 , _UpperCAmelCase = None , _UpperCAmelCase = "mean" , _UpperCAmelCase = 0 , _UpperCAmelCase = 0 , _UpperCAmelCase = 0 , _UpperCAmelCase = 0 , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = 64 , _UpperCAmelCase = 32 , _UpperCAmelCase = 32 , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , _UpperCAmelCase = True , _UpperCAmelCase = "gelu" , _UpperCAmelCase = 0.05 , _UpperCAmelCase = 0.1 , _UpperCAmelCase = 0.1 , _UpperCAmelCase = 0.1 , _UpperCAmelCase = 0.1 , _UpperCAmelCase = 100 , _UpperCAmelCase = 0.02 , _UpperCAmelCase=True , _UpperCAmelCase = "prob" , _UpperCAmelCase = 5 , _UpperCAmelCase = True , **_UpperCAmelCase , ):
'''simple docstring'''
__A : str = prediction_length
__A : str = context_length or prediction_length
__A : int = distribution_output
__A : int = loss
__A : Optional[Any] = input_size
__A : Optional[int] = num_time_features
__A : List[Any] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
__A : int = scaling
__A : int = num_dynamic_real_features
__A : Tuple = num_static_real_features
__A : Optional[Any] = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(_UpperCAmelCase) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`')
__A : Tuple = cardinality
else:
__A : Tuple = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(_UpperCAmelCase) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`')
__A : Union[str, Any] = embedding_dimension
else:
__A : Optional[Any] = [min(50 , (cat + 1) // 2) for cat in self.cardinality]
__A : List[Any] = num_parallel_samples
# Transformer architecture configuration
__A : int = input_size * len(self.lags_sequence) + self._number_of_features
__A : Union[str, Any] = d_model
__A : str = encoder_attention_heads
__A : str = decoder_attention_heads
__A : str = encoder_ffn_dim
__A : List[Any] = decoder_ffn_dim
__A : str = encoder_layers
__A : Optional[Any] = decoder_layers
__A : Dict = dropout
__A : List[Any] = attention_dropout
__A : int = activation_dropout
__A : List[str] = encoder_layerdrop
__A : Union[str, Any] = decoder_layerdrop
__A : str = activation_function
__A : str = init_std
__A : str = use_cache
# Informer
__A : Union[str, Any] = attention_type
__A : int = sampling_factor
__A : Union[str, Any] = distil
super().__init__(is_encoder_decoder=_UpperCAmelCase , **_UpperCAmelCase)
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return (
sum(self.embedding_dimension)
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
) | 8 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
lowercase__ : Any = '''hf-internal-testing/tiny-random-bert'''
lowercase__ : Optional[Any] = os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''')
lowercase__ : List[Any] = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6'''
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase)
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(_UpperCAmelCase))
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(_UpperCAmelCase , _UpperCAmelCase)))
with open(os.path.join(_UpperCAmelCase , 'refs' , 'main')) as f:
__A : Any = f.read()
self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'snapshots' , _UpperCAmelCase , _UpperCAmelCase))
self.assertTrue(os.path.isfile(_UpperCAmelCase))
# File is cached at the same place the second time.
__A : Tuple = cached_file(_UpperCAmelCase , _UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase)
# Using a specific revision to test the full commit hash.
__A : List[Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='9b8c223')
self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'snapshots' , _UpperCAmelCase , _UpperCAmelCase))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid model identifier'):
__A : Dict = cached_file('tiny-random-bert' , _UpperCAmelCase)
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid git identifier'):
__A : Optional[int] = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='aaaa')
with self.assertRaisesRegex(_UpperCAmelCase , 'does not appear to have a file named'):
__A : int = cached_file(_UpperCAmelCase , 'conf')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaisesRegex(_UpperCAmelCase , 'does not appear to have a file named'):
__A : Any = cached_file(_UpperCAmelCase , 'conf')
with open(os.path.join(_UpperCAmelCase , 'refs' , 'main')) as f:
__A : Dict = f.read()
self.assertTrue(os.path.isfile(os.path.join(_UpperCAmelCase , '.no_exist' , _UpperCAmelCase , 'conf')))
__A : List[Any] = cached_file(_UpperCAmelCase , 'conf' , _raise_exceptions_for_missing_entries=_UpperCAmelCase)
self.assertIsNone(_UpperCAmelCase)
__A : str = cached_file(_UpperCAmelCase , 'conf' , local_files_only=_UpperCAmelCase , _raise_exceptions_for_missing_entries=_UpperCAmelCase)
self.assertIsNone(_UpperCAmelCase)
__A : List[str] = mock.Mock()
__A : Dict = 500
__A : List[str] = {}
__A : List[Any] = HTTPError
__A : Optional[Any] = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=_UpperCAmelCase) as mock_head:
__A : Dict = cached_file(_UpperCAmelCase , 'conf' , _raise_exceptions_for_connection_errors=_UpperCAmelCase)
self.assertIsNone(_UpperCAmelCase)
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.assertTrue(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase))
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase))
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.assertIsNone(get_file_from_repo('bert-base-cased' , 'ahah.txt'))
# The function raises if the repository does not exist.
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid model identifier'):
get_file_from_repo('bert-base-case' , _UpperCAmelCase)
# The function raises if the revision does not exist.
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid git identifier'):
get_file_from_repo('bert-base-cased' , _UpperCAmelCase , revision='ahaha')
__A : List[str] = get_file_from_repo('bert-base-cased' , _UpperCAmelCase)
# The name is the cached name which is not very easy to test, so instead we load the content.
__A : List[str] = json.loads(open(_UpperCAmelCase , 'r').read())
self.assertEqual(config['hidden_size'] , 768)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__A : Tuple = Path(_UpperCAmelCase) / 'a.txt'
filename.touch()
self.assertEqual(get_file_from_repo(_UpperCAmelCase , 'a.txt') , str(_UpperCAmelCase))
self.assertIsNone(get_file_from_repo(_UpperCAmelCase , 'b.txt')) | 8 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
lowercase__ : int = (3, 9, -11, 0, 7, 5, 1, -1)
lowercase__ : List[str] = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = 42
lowerCAmelCase = 42
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase):
'''simple docstring'''
__A : Node | None = None
for i in sorted(_UpperCAmelCase , reverse=_UpperCAmelCase):
__A : Dict = Node(_UpperCAmelCase , self.head)
def __iter__( self):
'''simple docstring'''
__A : Dict = self.head
while node:
yield node.data
__A : Dict = node.next_node
def __len__( self):
'''simple docstring'''
return sum(1 for _ in self)
def __str__( self):
'''simple docstring'''
return " -> ".join([str(_UpperCAmelCase) for node in self])
def _lowerCAmelCase ( __snake_case : SortedLinkedList , __snake_case : SortedLinkedList ) -> SortedLinkedList:
return SortedLinkedList(list(__snake_case ) + list(__snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ : List[Any] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even))) | 8 |
'''simple docstring'''
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def _lowerCAmelCase ( __snake_case : str , __snake_case : str , **__snake_case : List[Any] ) -> Any:
__A : Optional[Any] = AutoConfig.from_pretrained(__snake_case , **__snake_case )
__A : int = AutoModelForSeqaSeqLM.from_config(__snake_case )
model.save_pretrained(__snake_case )
AutoTokenizer.from_pretrained(__snake_case ).save_pretrained(__snake_case )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version) | 8 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : str , __snake_case : str ) -> int:
if len(__snake_case ) != len(__snake_case ):
raise ValueError('String lengths must match!' )
__A : Optional[Any] = 0
for chara, chara in zip(__snake_case , __snake_case ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
lowercase__ : Any = {
'''google/tapas-base-finetuned-sqa''': (
'''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wtq''': (
'''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wikisql-supervised''': (
'''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-tabfact''': (
'''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''tapas'''
def __init__( self , _UpperCAmelCase=3_0522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1024 , _UpperCAmelCase=[3, 256, 256, 2, 256, 256, 10] , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0 , _UpperCAmelCase=10.0 , _UpperCAmelCase=0 , _UpperCAmelCase=1.0 , _UpperCAmelCase=None , _UpperCAmelCase=1.0 , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=1.0 , _UpperCAmelCase=1.0 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase="ratio" , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=64 , _UpperCAmelCase=32 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase)
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
__A : Dict = vocab_size
__A : Tuple = hidden_size
__A : Any = num_hidden_layers
__A : int = num_attention_heads
__A : Tuple = hidden_act
__A : Tuple = intermediate_size
__A : List[Any] = hidden_dropout_prob
__A : int = attention_probs_dropout_prob
__A : List[str] = max_position_embeddings
__A : Optional[int] = type_vocab_sizes
__A : str = initializer_range
__A : List[str] = layer_norm_eps
# Fine-tuning task hyperparameters
__A : List[str] = positive_label_weight
__A : List[Any] = num_aggregation_labels
__A : Optional[Any] = aggregation_loss_weight
__A : Tuple = use_answer_as_supervision
__A : List[str] = answer_loss_importance
__A : Any = use_normalized_answer_loss
__A : Any = huber_loss_delta
__A : Union[str, Any] = temperature
__A : Tuple = aggregation_temperature
__A : Optional[Any] = use_gumbel_for_cells
__A : List[str] = use_gumbel_for_aggregation
__A : Tuple = average_approximation_function
__A : List[str] = cell_selection_preference
__A : Dict = answer_loss_cutoff
__A : Union[str, Any] = max_num_rows
__A : Optional[Any] = max_num_columns
__A : int = average_logits_per_cell
__A : Optional[Any] = select_one_column
__A : int = allow_empty_column_selection
__A : List[Any] = init_cell_selection_weights_to_zero
__A : int = reset_position_index_per_cell
__A : Union[str, Any] = disable_per_token_loss
# Aggregation hyperparameters
__A : Optional[Any] = aggregation_labels
__A : List[str] = no_aggregation_label_index
if isinstance(self.aggregation_labels , _UpperCAmelCase):
__A : Optional[Any] = {int(_UpperCAmelCase): v for k, v in aggregation_labels.items()} | 8 | 1 |
'''simple docstring'''
lowercase__ : str = tuple[float, float, float]
lowercase__ : Dict = tuple[float, float, float]
def _lowerCAmelCase ( __snake_case : Pointad , __snake_case : Pointad ) -> Vectorad:
__A : List[Any] = end_pointa[0] - end_pointa[0]
__A : Union[str, Any] = end_pointa[1] - end_pointa[1]
__A : Union[str, Any] = end_pointa[2] - end_pointa[2]
return (x, y, z)
def _lowerCAmelCase ( __snake_case : Vectorad , __snake_case : Vectorad ) -> Vectorad:
__A : Tuple = ab[1] * ac[2] - ab[2] * ac[1] # *i
__A : Union[str, Any] = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
__A : int = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def _lowerCAmelCase ( __snake_case : Vectorad , __snake_case : int ) -> bool:
return tuple(round(__snake_case , __snake_case ) for x in vector ) == (0, 0, 0)
def _lowerCAmelCase ( __snake_case : Pointad , __snake_case : Pointad , __snake_case : Pointad , __snake_case : int = 10 ) -> bool:
__A : int = create_vector(__snake_case , __snake_case )
__A : Union[str, Any] = create_vector(__snake_case , __snake_case )
return is_zero_vector(get_ad_vectors_cross(__snake_case , __snake_case ) , __snake_case ) | 8 |
'''simple docstring'''
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=sys.maxsize):
'''simple docstring'''
__A : Union[str, Any] = 'bilinear'
__A : int = max_size
__A : Optional[Any] = short_edge_length
def __call__( self , _UpperCAmelCase):
'''simple docstring'''
__A : int = []
for img in imgs:
__A ,__A : Dict = img.shape[:2]
# later: provide list and randomly choose index for resize
__A : List[Any] = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1)
if size == 0:
return img
__A : Tuple = size * 1.0 / min(_UpperCAmelCase , _UpperCAmelCase)
if h < w:
__A ,__A : Optional[Any] = size, scale * w
else:
__A ,__A : Optional[Any] = scale * h, size
if max(_UpperCAmelCase , _UpperCAmelCase) > self.max_size:
__A : Tuple = self.max_size * 1.0 / max(_UpperCAmelCase , _UpperCAmelCase)
__A : Tuple = newh * scale
__A : Dict = neww * scale
__A : Dict = int(neww + 0.5)
__A : Optional[int] = int(newh + 0.5)
if img.dtype == np.uinta:
__A : int = Image.fromarray(_UpperCAmelCase)
__A : Optional[int] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR)
__A : Dict = np.asarray(_UpperCAmelCase)
else:
__A : Optional[Any] = img.permute(2 , 0 , 1).unsqueeze(0) # 3, 0, 1) # hw(c) -> nchw
__A : Dict = nn.functional.interpolate(
_UpperCAmelCase , (newh, neww) , mode=self.interp_method , align_corners=_UpperCAmelCase).squeeze(0)
img_augs.append(_UpperCAmelCase)
return img_augs
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST)
__A : List[Any] = cfg.INPUT.FORMAT
__A : Dict = cfg.SIZE_DIVISIBILITY
__A : str = cfg.PAD_VALUE
__A : Union[str, Any] = cfg.INPUT.MAX_SIZE_TEST
__A : int = cfg.MODEL.DEVICE
__A : Tuple = torch.tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
__A : Union[str, Any] = torch.tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
__A : int = lambda _UpperCAmelCase: (x - self.pixel_mean) / self.pixel_std
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = tuple(max(_UpperCAmelCase) for s in zip(*[img.shape for img in images]))
__A : Dict = [im.shape[-2:] for im in images]
__A : Optional[int] = [
nn.functional.pad(
_UpperCAmelCase , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(_UpperCAmelCase , _UpperCAmelCase)
]
return torch.stack(_UpperCAmelCase), torch.tensor(_UpperCAmelCase)
def __call__( self , _UpperCAmelCase , _UpperCAmelCase=False):
'''simple docstring'''
with torch.no_grad():
if not isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : int = [images]
if single_image:
assert len(_UpperCAmelCase) == 1
for i in range(len(_UpperCAmelCase)):
if isinstance(images[i] , torch.Tensor):
images.insert(_UpperCAmelCase , images.pop(_UpperCAmelCase).to(self.device).float())
elif not isinstance(images[i] , torch.Tensor):
images.insert(
_UpperCAmelCase , torch.as_tensor(img_tensorize(images.pop(_UpperCAmelCase) , input_format=self.input_format))
.to(self.device)
.float() , )
# resize smallest edge
__A : str = torch.tensor([im.shape[:2] for im in images])
__A : List[str] = self.aug(_UpperCAmelCase)
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
__A : Any = [self.normalizer(_UpperCAmelCase) for x in images]
# now pad them to do the following operations
__A ,__A : Any = self.pad(_UpperCAmelCase)
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
__A : str = torch.true_divide(_UpperCAmelCase , _UpperCAmelCase)
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : str ) -> Dict:
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _lowerCAmelCase ( __snake_case : List[str] , __snake_case : Tuple[int, int] ) -> int:
assert torch.isfinite(__snake_case ).all(), "Box tensor contains infinite or NaN!"
__A ,__A : int = box_size
tensor[:, 0].clamp_(min=0 , max=__snake_case )
tensor[:, 1].clamp_(min=0 , max=__snake_case )
tensor[:, 2].clamp_(min=0 , max=__snake_case )
tensor[:, 3].clamp_(min=0 , max=__snake_case ) | 8 | 1 |
'''simple docstring'''
class SCREAMING_SNAKE_CASE :
def __init__( self):
'''simple docstring'''
__A : Union[str, Any] = {}
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
print(self.vertex)
for i in self.vertex:
print(_UpperCAmelCase , ' -> ' , ' -> '.join([str(_UpperCAmelCase) for j in self.vertex[i]]))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if from_vertex in self.vertex:
self.vertex[from_vertex].append(_UpperCAmelCase)
else:
# else make a new vertex
__A : Tuple = [to_vertex]
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = [False] * len(self.vertex)
# call the recursive helper function
for i in range(len(self.vertex)):
if not visited[i]:
self.dfs_recursive(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Dict = True
print(_UpperCAmelCase , end=' ')
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(_UpperCAmelCase , _UpperCAmelCase)
if __name__ == "__main__":
lowercase__ : Any = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('''DFS:''')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3 | 8 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> Optional[Any]: # noqa: E741
__A : Tuple = len(__snake_case )
__A : Optional[int] = 0
__A : str = [0] * n
__A : int = [False] * n
__A : Tuple = [False] * n
def dfs(__snake_case : List[str] , __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : int ):
if parent == root:
out_edge_count += 1
__A : str = True
__A : Tuple = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
__A : Optional[int] = dfs(__snake_case , __snake_case , __snake_case , __snake_case )
__A : int = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
__A : Tuple = True
# AP found via cycle
if at == low[to]:
__A : Optional[Any] = True
else:
__A : Any = min(low[at] , __snake_case )
return out_edge_count
for i in range(__snake_case ):
if not visited[i]:
__A : Tuple = 0
__A : List[Any] = dfs(__snake_case , __snake_case , -1 , __snake_case )
__A : Union[str, Any] = out_edge_count > 1
for x in range(len(__snake_case ) ):
if is_art[x] is True:
print(__snake_case )
# Adjacency list of graph
lowercase__ : Tuple = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data) | 8 | 1 |
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = 'ylacombe/bark-small'
__A : List[str] = tempfile.mkdtemp()
__A : int = 'en_speaker_1'
__A : List[Any] = 'This is a test string'
__A : Any = 'speaker_embeddings_path.json'
__A : int = 'speaker_embeddings'
def SCREAMING_SNAKE_CASE ( self , **_UpperCAmelCase):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.checkpoint , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = self.get_tokenizer()
__A : Optional[int] = BarkProcessor(tokenizer=_UpperCAmelCase)
processor.save_pretrained(self.tmpdirname)
__A : Optional[Any] = BarkProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab())
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
__A : Dict = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
__A : List[str] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
__A : str = 35
__A : Tuple = 2
__A : str = 8
__A : Union[str, Any] = {
'semantic_prompt': np.ones(_UpperCAmelCase),
'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len)),
'fine_prompt': np.ones((nb_codebooks_total, seq_len)),
}
# test providing already loaded voice_preset
__A : int = processor(text=self.input_string , voice_preset=_UpperCAmelCase)
__A : int = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCAmelCase , np.array([])).tolist())
# test loading voice preset from npz file
__A : int = os.path.join(self.tmpdirname , 'file.npz')
np.savez(_UpperCAmelCase , **_UpperCAmelCase)
__A : Optional[int] = processor(text=self.input_string , voice_preset=_UpperCAmelCase)
__A : Any = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCAmelCase , np.array([])).tolist())
# test loading voice preset from the hub
__A : List[Any] = processor(text=self.input_string , voice_preset=self.voice_preset)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = self.get_tokenizer()
__A : Optional[int] = BarkProcessor(tokenizer=_UpperCAmelCase)
__A : Dict = processor(text=self.input_string)
__A : str = tokenizer(
self.input_string , padding='max_length' , max_length=256 , add_special_tokens=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist()) | 8 |
'''simple docstring'''
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
lowercase__ : int = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn.grep_linear''': '''encoder.layers.*.attention.gru_rel_pos_linear''',
'''self_attn.relative_attention_bias''': '''encoder.layers.*.attention.rel_attn_embed''',
'''self_attn.grep_a''': '''encoder.layers.*.attention.gru_rel_pos_const''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
lowercase__ : Dict = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def _lowerCAmelCase ( __snake_case : int , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Any , __snake_case : List[str] ) -> Union[str, Any]:
for attribute in key.split('.' ):
__A : int = getattr(__snake_case , __snake_case )
if weight_type is not None:
__A : Optional[int] = getattr(__snake_case , __snake_case ).shape
else:
__A : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
__A : Tuple = value
elif weight_type == "weight_g":
__A : Union[str, Any] = value
elif weight_type == "weight_v":
__A : Optional[Any] = value
elif weight_type == "bias":
__A : Optional[int] = value
else:
__A : Optional[int] = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _lowerCAmelCase ( __snake_case : Any , __snake_case : List[str] ) -> List[Any]:
__A : Optional[Any] = []
__A : Any = fairseq_model.state_dict()
__A : Union[str, Any] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
__A : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == 'group' , )
__A : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__A : int = True
if "*" in mapped_key:
__A : Any = name.split(__snake_case )[0].split('.' )[-2]
__A : List[Any] = mapped_key.replace('*' , __snake_case )
if "weight_g" in name:
__A : Optional[Any] = 'weight_g'
elif "weight_v" in name:
__A : Union[str, Any] = 'weight_v'
elif "bias" in name and "relative_attention_bias" not in name:
__A : Optional[Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__A : Tuple = 'weight'
else:
__A : Dict = None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(f'Unused weights: {unused_weights}' )
def _lowerCAmelCase ( __snake_case : List[str] , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Optional[int] ) -> int:
__A : int = full_name.split('conv_layers.' )[-1]
__A : List[str] = name.split('.' )
__A : Optional[int] = int(items[0] )
__A : str = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
__A : Optional[int] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
__A : Union[str, Any] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
__A : Dict = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
__A : Any = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__snake_case )
@torch.no_grad()
def _lowerCAmelCase ( __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Tuple=None ) -> Any:
# load the pre-trained checkpoints
__A : List[str] = torch.load(__snake_case )
__A : Dict = WavLMConfigOrig(checkpoint['cfg'] )
__A : Optional[int] = WavLMOrig(__snake_case )
model.load_state_dict(checkpoint['model'] )
model.eval()
if config_path is not None:
__A : List[Any] = WavLMConfig.from_pretrained(__snake_case )
else:
__A : Dict = WavLMConfig()
__A : Optional[Any] = WavLMModel(__snake_case )
recursively_load_weights(__snake_case , __snake_case )
hf_wavlm.save_pretrained(__snake_case )
if __name__ == "__main__":
lowercase__ : Tuple = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowercase__ : Any = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 8 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : list[list[int]] , __snake_case : int , __snake_case : int , __snake_case : set ) -> int:
__A ,__A : str = len(__snake_case ), len(grid[0] )
if (
min(__snake_case , __snake_case ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
__A : int = 0
count += depth_first_search(__snake_case , row + 1 , __snake_case , __snake_case )
count += depth_first_search(__snake_case , row - 1 , __snake_case , __snake_case )
count += depth_first_search(__snake_case , __snake_case , col + 1 , __snake_case )
count += depth_first_search(__snake_case , __snake_case , col - 1 , __snake_case )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = 42
class SCREAMING_SNAKE_CASE (a__ , a__ ):
@register_to_config
def __init__( self , _UpperCAmelCase = 6_5536 , _UpperCAmelCase = None , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , _UpperCAmelCase = 0 , _UpperCAmelCase = "fourier" , _UpperCAmelCase = True , _UpperCAmelCase = False , _UpperCAmelCase = 0.0 , _UpperCAmelCase = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , _UpperCAmelCase = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , _UpperCAmelCase = "UNetMidBlock1D" , _UpperCAmelCase = None , _UpperCAmelCase = (32, 32, 64) , _UpperCAmelCase = None , _UpperCAmelCase = 8 , _UpperCAmelCase = 1 , _UpperCAmelCase = False , ):
'''simple docstring'''
super().__init__()
__A : Dict = sample_size
# time
if time_embedding_type == "fourier":
__A : int = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=_UpperCAmelCase , log=_UpperCAmelCase , flip_sin_to_cos=_UpperCAmelCase)
__A : Any = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
__A : List[str] = Timesteps(
block_out_channels[0] , flip_sin_to_cos=_UpperCAmelCase , downscale_freq_shift=_UpperCAmelCase)
__A : List[str] = block_out_channels[0]
if use_timestep_embedding:
__A : Optional[Any] = block_out_channels[0] * 4
__A : Optional[int] = TimestepEmbedding(
in_channels=_UpperCAmelCase , time_embed_dim=_UpperCAmelCase , act_fn=_UpperCAmelCase , out_dim=block_out_channels[0] , )
__A : Dict = nn.ModuleList([])
__A : Dict = None
__A : Tuple = nn.ModuleList([])
__A : Tuple = None
# down
__A : Any = in_channels
for i, down_block_type in enumerate(_UpperCAmelCase):
__A : Tuple = output_channel
__A : Optional[Any] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
__A : List[str] = i == len(_UpperCAmelCase) - 1
__A : int = get_down_block(
_UpperCAmelCase , num_layers=_UpperCAmelCase , in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(_UpperCAmelCase)
# mid
__A : str = get_mid_block(
_UpperCAmelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=_UpperCAmelCase , add_downsample=_UpperCAmelCase , )
# up
__A : Optional[int] = list(reversed(_UpperCAmelCase))
__A : Optional[int] = reversed_block_out_channels[0]
if out_block_type is None:
__A : str = out_channels
else:
__A : List[Any] = block_out_channels[0]
for i, up_block_type in enumerate(_UpperCAmelCase):
__A : Optional[Any] = output_channel
__A : Optional[Any] = (
reversed_block_out_channels[i + 1] if i < len(_UpperCAmelCase) - 1 else final_upsample_channels
)
__A : Dict = i == len(_UpperCAmelCase) - 1
__A : str = get_up_block(
_UpperCAmelCase , num_layers=_UpperCAmelCase , in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(_UpperCAmelCase)
__A : Optional[int] = output_channel
# out
__A : str = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32)
__A : Optional[Any] = get_out_block(
out_block_type=_UpperCAmelCase , num_groups_out=_UpperCAmelCase , embed_dim=block_out_channels[0] , out_channels=_UpperCAmelCase , act_fn=_UpperCAmelCase , fc_dim=block_out_channels[-1] // 4 , )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True , ):
'''simple docstring'''
__A : Any = timestep
if not torch.is_tensor(_UpperCAmelCase):
__A : Any = torch.tensor([timesteps] , dtype=torch.long , device=sample.device)
elif torch.is_tensor(_UpperCAmelCase) and len(timesteps.shape) == 0:
__A : Any = timesteps[None].to(sample.device)
__A : List[Any] = self.time_proj(_UpperCAmelCase)
if self.config.use_timestep_embedding:
__A : Dict = self.time_mlp(_UpperCAmelCase)
else:
__A : Dict = timestep_embed[..., None]
__A : Tuple = timestep_embed.repeat([1, 1, sample.shape[2]]).to(sample.dtype)
__A : List[Any] = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]))
# 2. down
__A : int = ()
for downsample_block in self.down_blocks:
__A ,__A : int = downsample_block(hidden_states=_UpperCAmelCase , temb=_UpperCAmelCase)
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
__A : Optional[int] = self.mid_block(_UpperCAmelCase , _UpperCAmelCase)
# 4. up
for i, upsample_block in enumerate(self.up_blocks):
__A : Any = down_block_res_samples[-1:]
__A : Optional[int] = down_block_res_samples[:-1]
__A : Any = upsample_block(_UpperCAmelCase , res_hidden_states_tuple=_UpperCAmelCase , temb=_UpperCAmelCase)
# 5. post-process
if self.out_block:
__A : Dict = self.out_block(_UpperCAmelCase , _UpperCAmelCase)
if not return_dict:
return (sample,)
return UNetaDOutput(sample=_UpperCAmelCase) | 8 | 1 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
lowercase__ : Dict = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE (a__ ):
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : List[Any] = [label.strip() for label in labels.split(',') if label.strip()]
return labels
def __call__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if len(_UpperCAmelCase) == 0 or len(_UpperCAmelCase) == 0:
raise ValueError('You must include at least one label and at least one sequence.')
if hypothesis_template.format(labels[0]) == hypothesis_template:
raise ValueError(
(
'The provided hypothesis_template "{}" was not able to be formatted with the target labels. '
'Make sure the passed template includes formatting syntax such as {{}} where the label should go.'
).format(_UpperCAmelCase))
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : Dict = [sequences]
__A : Optional[int] = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(_UpperCAmelCase)] for label in labels])
return sequence_pairs, sequences
@add_end_docstrings(a__ )
class SCREAMING_SNAKE_CASE (a__ ):
def __init__( self , _UpperCAmelCase=ZeroShotClassificationArgumentHandler() , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
__A : Optional[Any] = args_parser
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase)
if self.entailment_id == -1:
logger.warning(
'Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to '
'-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.')
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith('entail'):
return ind
return -1
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=TruncationStrategy.ONLY_FIRST , **_UpperCAmelCase):
'''simple docstring'''
__A : List[str] = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
'Tokenizer was not supporting padding necessary for zero-shot, attempting to use '
' `pad_token=eos_token`')
__A : Any = self.tokenizer.eos_token
try:
__A : Dict = self.tokenizer(
_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_tensors=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , )
except Exception as e:
if "too short" in str(_UpperCAmelCase):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
__A : Optional[Any] = self.tokenizer(
_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_tensors=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def SCREAMING_SNAKE_CASE ( self , **_UpperCAmelCase):
'''simple docstring'''
if kwargs.get('multi_class' , _UpperCAmelCase) is not None:
__A : Union[str, Any] = kwargs['multi_class']
logger.warning(
'The `multi_class` argument has been deprecated and renamed to `multi_label`. '
'`multi_class` will be removed in a future version of Transformers.')
__A : List[Any] = {}
if "candidate_labels" in kwargs:
__A : Dict = self._args_parser._parse_labels(kwargs['candidate_labels'])
if "hypothesis_template" in kwargs:
__A : Optional[Any] = kwargs['hypothesis_template']
__A : Tuple = {}
if "multi_label" in kwargs:
__A : Dict = kwargs['multi_label']
return preprocess_params, {}, postprocess_params
def __call__( self , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase , ):
'''simple docstring'''
if len(_UpperCAmelCase) == 0:
pass
elif len(_UpperCAmelCase) == 1 and "candidate_labels" not in kwargs:
__A : Dict = args[0]
else:
raise ValueError(F'Unable to understand extra arguments {args}')
return super().__call__(_UpperCAmelCase , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase="This example is {}."):
'''simple docstring'''
__A ,__A : int = self._args_parser(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
for i, (candidate_label, sequence_pair) in enumerate(zip(_UpperCAmelCase , _UpperCAmelCase)):
__A : Optional[int] = self._parse_and_tokenize([sequence_pair])
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(_UpperCAmelCase) - 1,
**model_input,
}
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : List[str] = inputs['candidate_label']
__A : Any = inputs['sequence']
__A : List[str] = {k: inputs[k] for k in self.tokenizer.model_input_names}
__A : Optional[int] = self.model(**_UpperCAmelCase)
__A : Optional[Any] = {
'candidate_label': candidate_label,
'sequence': sequence,
'is_last': inputs['is_last'],
**outputs,
}
return model_outputs
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase=False):
'''simple docstring'''
__A : Optional[int] = [outputs['candidate_label'] for outputs in model_outputs]
__A : List[Any] = [outputs['sequence'] for outputs in model_outputs]
__A : int = np.concatenate([output['logits'].numpy() for output in model_outputs])
__A : Optional[Any] = logits.shape[0]
__A : Optional[int] = len(_UpperCAmelCase)
__A : Tuple = N // n
__A : List[Any] = logits.reshape((num_sequences, n, -1))
if multi_label or len(_UpperCAmelCase) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
__A : Any = self.entailment_id
__A : List[Any] = -1 if entailment_id == 0 else 0
__A : Tuple = reshaped_outputs[..., [contradiction_id, entailment_id]]
__A : str = np.exp(_UpperCAmelCase) / np.exp(_UpperCAmelCase).sum(-1 , keepdims=_UpperCAmelCase)
__A : Any = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
__A : List[str] = reshaped_outputs[..., self.entailment_id]
__A : str = np.exp(_UpperCAmelCase) / np.exp(_UpperCAmelCase).sum(-1 , keepdims=_UpperCAmelCase)
__A : Dict = list(reversed(scores[0].argsort()))
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
} | 8 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : str , __snake_case : str ) -> int:
if len(__snake_case ) != len(__snake_case ):
raise ValueError('String lengths must match!' )
__A : Optional[Any] = 0
for chara, chara in zip(__snake_case , __snake_case ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 1 |
'''simple docstring'''
from math import pi
def _lowerCAmelCase ( __snake_case : int , __snake_case : int ) -> float:
return 2 * pi * radius * (angle / 3_60)
if __name__ == "__main__":
print(arc_length(90, 10)) | 8 |
'''simple docstring'''
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Tuple = logging.get_logger(__name__)
def _lowerCAmelCase ( __snake_case : str , __snake_case : str ) -> Union[str, Any]:
__A : int = RobertaPreLayerNormConfig.from_pretrained(
__snake_case , architectures=['RobertaPreLayerNormForMaskedLM'] )
# convert state_dict
__A : Tuple = torch.load(hf_hub_download(repo_id=__snake_case , filename='pytorch_model.bin' ) )
__A : str = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('roberta.' ):
__A : Dict = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ):
continue
__A : str = tensor_value
__A : Union[str, Any] = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=__snake_case , config=__snake_case , state_dict=__snake_case )
model.save_pretrained(__snake_case )
# convert tokenizer
__A : List[Any] = AutoTokenizer.from_pretrained(__snake_case )
tokenizer.save_pretrained(__snake_case )
if __name__ == "__main__":
lowercase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint-repo''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowercase__ : Optional[Any] = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path) | 8 | 1 |
'''simple docstring'''
import heapq
def _lowerCAmelCase ( __snake_case : dict ) -> set[int]:
__A : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(__snake_case , [-1 * len(__snake_case ), (key, value)] )
# chosen_vertices = set of chosen vertices
__A : Union[str, Any] = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
__A : Any = heapq.heappop(__snake_case )[1][0]
chosen_vertices.add(__snake_case )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
__A : Dict = elem[1][1].index(__snake_case )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(__snake_case )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ : str = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""") | 8 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
lowercase__ : Dict = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = field(default=a__ , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
lowerCAmelCase = field(
default=a__ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
lowerCAmelCase = field(
default=a__ , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
lowerCAmelCase = field(
default=a__ , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
lowerCAmelCase = field(
default=a__ , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = super().to_dict()
for k, v in d.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : List[Any] = v.to_dict()
return d | 8 | 1 |
'''simple docstring'''
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
lowercase__ : Any = parse(importlib.metadata.version('''torch'''))
def _lowerCAmelCase ( __snake_case : Union[str, Version] , __snake_case : str , __snake_case : str ) -> Optional[Any]:
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(f'`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}' )
__A : List[str] = STR_OPERATION_TO_FUNC[operation]
if isinstance(__snake_case , __snake_case ):
__A : Optional[int] = parse(importlib.metadata.version(__snake_case ) )
return operation(__snake_case , parse(__snake_case ) )
def _lowerCAmelCase ( __snake_case : str , __snake_case : str ) -> Dict:
return compare_versions(__snake_case , __snake_case , __snake_case ) | 8 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : List[Any] = logging.get_logger(__name__)
lowercase__ : Optional[int] = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''lxmert'''
lowerCAmelCase = {}
def __init__( self , _UpperCAmelCase=3_0522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=9500 , _UpperCAmelCase=1600 , _UpperCAmelCase=400 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=9 , _UpperCAmelCase=5 , _UpperCAmelCase=5 , _UpperCAmelCase=2048 , _UpperCAmelCase=4 , _UpperCAmelCase=6.67 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , **_UpperCAmelCase , ):
'''simple docstring'''
__A : Tuple = vocab_size
__A : int = hidden_size
__A : str = num_attention_heads
__A : Tuple = hidden_act
__A : int = intermediate_size
__A : str = hidden_dropout_prob
__A : Optional[int] = attention_probs_dropout_prob
__A : Optional[Any] = max_position_embeddings
__A : Tuple = type_vocab_size
__A : Optional[int] = initializer_range
__A : Any = layer_norm_eps
__A : Optional[Any] = num_qa_labels
__A : Optional[int] = num_object_labels
__A : Any = num_attr_labels
__A : Union[str, Any] = l_layers
__A : Optional[int] = x_layers
__A : List[Any] = r_layers
__A : Tuple = visual_feat_dim
__A : Tuple = visual_pos_dim
__A : Optional[int] = visual_loss_normalizer
__A : int = task_matched
__A : List[Any] = task_mask_lm
__A : Optional[Any] = task_obj_predict
__A : str = task_qa
__A : List[Any] = visual_obj_loss
__A : Optional[Any] = visual_attr_loss
__A : Union[str, Any] = visual_feat_loss
__A : Union[str, Any] = {'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers}
super().__init__(**_UpperCAmelCase) | 8 | 1 |
'''simple docstring'''
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
lowercase__ : Optional[Any] = logging.get_logger(__name__)
lowercase__ : List[str] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase__ : List[Any] = {
'''vocab_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json''',
},
'''merges_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''Salesforce/codegen-350M-mono''': (
'''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json'''
),
},
}
lowercase__ : Optional[int] = {
'''Salesforce/codegen-350M-mono''': 20_48,
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = ['''input_ids''', '''attention_mask''']
lowerCAmelCase = CodeGenTokenizer
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="<|endoftext|>" , _UpperCAmelCase="<|endoftext|>" , _UpperCAmelCase="<|endoftext|>" , _UpperCAmelCase=False , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
_UpperCAmelCase , _UpperCAmelCase , tokenizer_file=_UpperCAmelCase , unk_token=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , **_UpperCAmelCase , )
if kwargs.pop('add_bos_token' , _UpperCAmelCase):
__A : List[Any] = kwargs.pop('name_or_path' , '')
raise ValueError(
'Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.'
'Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n'
F'`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n'
F'`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n'
'This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.'
' so that the fast tokenizer works correctly.')
__A : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space' , _UpperCAmelCase) != add_prefix_space:
__A : Dict = getattr(_UpperCAmelCase , pre_tok_state.pop('type'))
__A : Optional[int] = add_prefix_space
__A : Union[str, Any] = pre_tok_class(**_UpperCAmelCase)
__A : Tuple = add_prefix_space
def SCREAMING_SNAKE_CASE ( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
__A : Dict = kwargs.get('is_split_into_words' , _UpperCAmelCase)
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_UpperCAmelCase , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
__A : str = kwargs.get('is_split_into_words' , _UpperCAmelCase)
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_UpperCAmelCase , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
__A : List[str] = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase)
return tuple(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
__A : List[str] = super().decode(
token_ids=_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase , **_UpperCAmelCase , )
if truncate_before_pattern is not None and len(_UpperCAmelCase) > 0:
__A : Union[str, Any] = self.truncate(_UpperCAmelCase , _UpperCAmelCase)
return decoded_text
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
def find_re(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
__A : Union[str, Any] = pattern.search(_UpperCAmelCase , _UpperCAmelCase)
return m.start() if m else -1
__A : int = [re.compile(_UpperCAmelCase , re.MULTILINE) for pattern in truncate_before_pattern]
__A : Union[str, Any] = list(re.finditer('^print' , _UpperCAmelCase , re.MULTILINE))
if len(_UpperCAmelCase) > 1:
__A : Dict = completion[: prints[1].start()]
__A : List[Any] = list(re.finditer('^def' , _UpperCAmelCase , re.MULTILINE))
if len(_UpperCAmelCase) > 1:
__A : Any = completion[: defs[1].start()]
__A : Union[str, Any] = 0
__A : int = [
pos for pos in [find_re(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) for terminal in terminals] if pos != -1
]
if len(_UpperCAmelCase) > 0:
return completion[: min(_UpperCAmelCase)]
else:
return completion | 8 |
'''simple docstring'''
import math
import sys
def _lowerCAmelCase ( __snake_case : int ) -> int:
if number != int(__snake_case ):
raise ValueError('the value of input must be a natural number' )
if number < 0:
raise ValueError('the value of input must not be a negative number' )
if number == 0:
return 1
__A : str = [-1] * (number + 1)
__A : Dict = 0
for i in range(1 , number + 1 ):
__A : int = sys.maxsize
__A : int = int(math.sqrt(__snake_case ) )
for j in range(1 , root + 1 ):
__A : str = 1 + answers[i - (j**2)]
__A : Dict = min(__snake_case , __snake_case )
__A : Union[str, Any] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 0 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( __snake_case : list[int] , __snake_case : list[int] , __snake_case : int ) -> tuple[float, list[float]]:
__A : int = list(range(len(__snake_case ) ) )
__A : Optional[Any] = [v / w for v, w in zip(__snake_case , __snake_case )]
index.sort(key=lambda __snake_case : ratio[i] , reverse=__snake_case )
__A : float = 0
__A : list[float] = [0] * len(__snake_case )
for i in index:
if weight[i] <= capacity:
__A : Optional[int] = 1
max_value += value[i]
capacity -= weight[i]
else:
__A : List[Any] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 0 |
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
return abs(_lowercase ) if a == 0 else greatest_common_divisor(b % a , _lowercase )
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
while y: # --> when y=0 then loop will terminate and return x as final GCD.
__UpperCamelCase, __UpperCamelCase = y, x % y
return abs(_lowercase )
def _A ( ) -> Optional[int]:
"""simple docstring"""
try:
__UpperCamelCase = input('Enter two integers separated by comma (,): ' ).split(',' )
__UpperCamelCase = int(nums[0] )
__UpperCamelCase = int(nums[1] )
print(
f'''greatest_common_divisor({num_a}, {num_a}) = '''
f'''{greatest_common_divisor(_lowercase , _lowercase )}''' )
print(f'''By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(_lowercase , _lowercase )}''' )
except (IndexError, UnboundLocalError, ValueError):
print('Wrong input' )
if __name__ == "__main__":
main()
| 1 |
'''simple docstring'''
from __future__ import annotations
import math
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase):
'''simple docstring'''
__A : int = size
# approximate the overall size of segment tree with given value
__A : Optional[Any] = [0 for i in range(0 , 4 * size)]
# create array to store lazy update
__A : Optional[Any] = [0 for i in range(0 , 4 * size)]
__A : str = [0 for i in range(0 , 4 * size)] # flag for lazy update
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return idx * 2
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return idx * 2 + 1
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if left_element == right_element:
__A : List[Any] = a[left_element - 1]
else:
__A : List[str] = (left_element + right_element) // 2
self.build(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
self.build(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase)
__A : Any = max(
self.segment_tree[self.left(_UpperCAmelCase)] , self.segment_tree[self.right(_UpperCAmelCase)])
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if self.flag[idx] is True:
__A : Optional[Any] = self.lazy[idx]
__A : Optional[Any] = False
if left_element != right_element:
__A : List[Any] = self.lazy[idx]
__A : Dict = self.lazy[idx]
__A : Tuple = True
__A : Union[str, Any] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
__A : Optional[int] = val
if left_element != right_element:
__A : Tuple = val
__A : Any = val
__A : Tuple = True
__A : Union[str, Any] = True
return True
__A : str = (left_element + right_element) // 2
self.update(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
self.update(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
__A : int = max(
self.segment_tree[self.left(_UpperCAmelCase)] , self.segment_tree[self.right(_UpperCAmelCase)])
return True
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if self.flag[idx] is True:
__A : Union[str, Any] = self.lazy[idx]
__A : List[str] = False
if left_element != right_element:
__A : Union[str, Any] = self.lazy[idx]
__A : Optional[int] = self.lazy[idx]
__A : str = True
__A : Union[str, Any] = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
__A : Any = (left_element + right_element) // 2
__A : int = self.query(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
__A : Union[str, Any] = self.query(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
return max(_UpperCAmelCase , _UpperCAmelCase)
def __str__( self):
'''simple docstring'''
return str([self.query(1 , 1 , self.size , _UpperCAmelCase , _UpperCAmelCase) for i in range(1 , self.size + 1)])
if __name__ == "__main__":
lowercase__ : Union[str, Any] = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
lowercase__ : str = 15
lowercase__ : List[Any] = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 1_11)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 2_35)
print(segt) | 8 | 0 |
def SCREAMING_SNAKE_CASE_ ( _snake_case :list[int] , _snake_case :list[int] , _snake_case :int ) -> bool:
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(_snake_case ) )
def SCREAMING_SNAKE_CASE_ ( _snake_case :list[list[int]] , _snake_case :int , _snake_case :list[int] , _snake_case :int ) -> bool:
# Base Case
if index == len(_snake_case ):
return True
# Recursive Step
for i in range(_snake_case ):
if valid_coloring(graph[index] , _snake_case , _snake_case ):
# Color current vertex
_A = i
# Validate coloring
if util_color(_snake_case , _snake_case , _snake_case , index + 1 ):
return True
# Backtrack
_A = -1
return False
def SCREAMING_SNAKE_CASE_ ( _snake_case :list[list[int]] , _snake_case :int ) -> list[int]:
_A = [-1] * len(_snake_case )
if util_color(_snake_case , _snake_case , _snake_case , 0 ):
return colored_vertices
return []
| 2 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : int , __snake_case : int , __snake_case : int ) -> float:
__A : Dict = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def _lowerCAmelCase ( ) -> Union[str, Any]:
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 0 |
'''simple docstring'''
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def A_( A : str): # picklable for multiprocessing
return x.sum()
def A_( A : Union[str, Any]): # picklable for multiprocessing
return i + 1
@dataclass
class SCREAMING_SNAKE_CASE__ :
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = {}
UpperCamelCase = []
UpperCamelCase = 1
UpperCamelCase = [1, 2]
UpperCamelCase = {'a': 1, 'b': 2}
UpperCamelCase = {'a': [1, 2], 'b': [3, 4]}
UpperCamelCase = {'a': {'1': 1}, 'b': 2}
UpperCamelCase = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
UpperCamelCase = {}
UpperCamelCase = []
UpperCamelCase = 2
UpperCamelCase = [2, 3]
UpperCamelCase = {'a': 2, 'b': 3}
UpperCamelCase = {'a': [2, 3], 'b': [4, 5]}
UpperCamelCase = {'a': {'1': 2}, 'b': 3}
UpperCamelCase = {'a': 2, 'b': 3, 'c': 4, 'd': 5}
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
UpperCamelCase = 2
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
UpperCamelCase = {'a': np.eye(2 ), 'b': np.zeros(3 ), 'c': np.ones(2 )}
UpperCamelCase = {'a': 2, 'b': 0, 'c': 2}
UpperCamelCase = {
'a': np.eye(2 ).astype(A_ ),
'b': np.zeros(3 ).astype(A_ ),
'c': np.ones(2 ).astype(A_ ),
}
self.assertEqual(map_nested(A_ , A_ , map_numpy=A_ ) , A_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(A_ , A_ , map_numpy=A_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(A_ , A_ , map_numpy=A_ , num_proc=A_ ) , A_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(A_ , A_ , map_numpy=A_ , num_proc=A_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(A_ ): # can't pickle a local lambda
map_nested(lambda A_ : x + 1 , A_ , num_proc=A_ )
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
UpperCamelCase = {'a': 1, 'b': 2}
UpperCamelCase = {'a': 3, 'b': 4}
UpperCamelCase = {'a': 5, 'b': 6}
UpperCamelCase = sorted([('a', (1, 3, 5)), ('b', (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(A_ , A_ , A_ ) ) , A_ )
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
class SCREAMING_SNAKE_CASE__ :
lowerCAmelCase_ = """bar"""
UpperCamelCase = Foo()
self.assertEqual(foo.my_attr , 'bar' )
with temporary_assignment(A_ , 'my_attr' , 'BAR' ):
self.assertEqual(foo.my_attr , 'BAR' )
self.assertEqual(foo.my_attr , 'bar' )
@pytest.mark.parametrize(
'iterable_length, num_proc, expected_num_proc' , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def A_( A : Optional[Any] , A : str , A : Optional[Any]):
with patch('datasets.utils.py_utils._single_map_nested') as mock_single_map_nested, patch(
'datasets.parallel.parallel.Pool') as mock_multiprocessing_pool:
UpperCamelCase = {f'''{i}''': i for i in range(A)}
UpperCamelCase = map_nested(lambda A: x + 10 , A , num_proc=A , parallel_min_length=16)
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class SCREAMING_SNAKE_CASE__ ( snake_case_):
@require_tf
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
import tensorflow as tf
from tensorflow.keras import layers
UpperCamelCase = layers.Dense(2 )
def gen_random_output():
UpperCamelCase = tf.random.uniform((1, 3) )
return model(A_ ).numpy()
with temp_seed(42 , set_tensorflow=A_ ):
UpperCamelCase = gen_random_output()
with temp_seed(42 , set_tensorflow=A_ ):
UpperCamelCase = gen_random_output()
UpperCamelCase = gen_random_output()
np.testing.assert_equal(A_ , A_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
import torch
def gen_random_output():
UpperCamelCase = torch.nn.Linear(3 , 2 )
UpperCamelCase = torch.rand(1 , 3 )
return model(A_ ).detach().numpy()
with temp_seed(42 , set_pytorch=A_ ):
UpperCamelCase = gen_random_output()
with temp_seed(42 , set_pytorch=A_ ):
UpperCamelCase = gen_random_output()
UpperCamelCase = gen_random_output()
np.testing.assert_equal(A_ , A_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
UpperCamelCase = gen_random_output()
with temp_seed(42 ):
UpperCamelCase = gen_random_output()
UpperCamelCase = gen_random_output()
np.testing.assert_equal(A_ , A_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize('input_data' , [{}])
def A_( A : Any):
UpperCamelCase = NestedDataStructure(A).data
assert output_data == input_data
@pytest.mark.parametrize(
'data, expected_output' , [
({}, []),
([], []),
('foo', ['foo']),
(['foo', 'bar'], ['foo', 'bar']),
([['foo', 'bar']], ['foo', 'bar']),
([[['foo'], ['bar']]], ['foo', 'bar']),
([[['foo'], 'bar']], ['foo', 'bar']),
({'a': 1, 'b': 2}, [1, 2]),
({'a': [1, 2], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[1, 2]], 'b': [[3, 4]]}, [1, 2, 3, 4]),
({'a': [[1, 2]], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [[[3], [4]]]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [[3, 4]]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [3, [4]]}, [1, 2, 3, 4]),
({'a': {'1': 1}, 'b': 2}, [1, 2]),
({'a': {'1': [1]}, 'b': 2}, [1, 2]),
({'a': {'1': [1]}, 'b': [2]}, [1, 2]),
] , )
def A_( A : Optional[Any] , A : Optional[int]):
UpperCamelCase = NestedDataStructure(A).flatten()
assert output == expected_output
def A_( ):
UpperCamelCase = A(x=1 , y='foobar')
UpperCamelCase = {'x': 1, 'y': 'foobar'}
assert asdict(A) == expected_output
UpperCamelCase = {'a': {'b': A(x=10 , y='foo')}, 'c': [A(x=20 , y='bar')]}
UpperCamelCase = {'a': {'b': {'x': 10, 'y': 'foo'}}, 'c': [{'x': 20, 'y': 'bar'}]}
assert asdict(A) == expected_output
with pytest.raises(A):
asdict([1, A(x=10 , y='foo')])
def A_( A : str):
return text.split()
def A_( A : Optional[int]):
yield (time.time(), content)
time.sleep(2)
yield (time.time(), content)
def A_( ):
with Pool(2) as pool:
UpperCamelCase = list(iflatmap_unordered(A , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 10))
assert out.count('hello') == 10
assert out.count('there') == 10
assert len(A) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2) as pool:
UpperCamelCase = list(iflatmap_unordered(A , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 10))
assert out.count('hello') == 10
assert out.count('there') == 10
assert len(A) == 20
# check that we get items as fast as possible
with Pool(2) as pool:
UpperCamelCase = []
for yield_time, content in iflatmap_unordered(
A , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{'content': 'a'}, {'content': 'b'}]):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(A)
assert out.count('a') == 2
assert out.count('b') == 2
assert len(A) == 4
| 3 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ):
'''simple docstring'''
__A : Optional[int] = parent
__A : str = 13
__A : List[Any] = 7
__A : List[str] = True
__A : str = True
__A : Optional[Any] = True
__A : int = True
__A : Dict = 99
__A : Dict = 384
__A : Any = 2
__A : int = 4
__A : Optional[Any] = 37
__A : Optional[int] = 'gelu'
__A : Dict = 0.1
__A : Optional[int] = 0.1
__A : Any = 512
__A : int = 16
__A : List[str] = 2
__A : str = 0.02
__A : Any = 3
__A : str = 4
__A : Union[str, Any] = 128
__A : int = 2
__A : List[Any] = 9
__A : List[Any] = 1
__A : List[Any] = None
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__A : str = None
if self.use_input_mask:
__A : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
__A : Optional[Any] = None
if self.use_token_type_ids:
__A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__A : Optional[int] = None
__A : List[str] = None
__A : Dict = None
if self.use_labels:
__A : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__A : str = ids_tensor([self.batch_size] , self.num_choices)
__A : List[Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : int = TFConvBertModel(config=_UpperCAmelCase)
__A : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__A : Tuple = [input_ids, input_mask]
__A : Any = model(_UpperCAmelCase)
__A : Dict = model(_UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : str = TFConvBertForMaskedLM(config=_UpperCAmelCase)
__A : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : str = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[int] = self.num_labels
__A : Any = TFConvBertForSequenceClassification(config=_UpperCAmelCase)
__A : Optional[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : Dict = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Tuple = self.num_choices
__A : List[str] = TFConvBertForMultipleChoice(config=_UpperCAmelCase)
__A : int = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : Optional[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : List[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : int = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__A : Optional[Any] = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = self.num_labels
__A : List[Any] = TFConvBertForTokenClassification(config=_UpperCAmelCase)
__A : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : int = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[Any] = TFConvBertForQuestionAnswering(config=_UpperCAmelCase)
__A : Any = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : Union[str, Any] = model(_UpperCAmelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = self.prepare_config_and_inputs()
(
(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,
) : Union[str, Any] = config_and_inputs
__A : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE (a__ , a__ , unittest.TestCase ):
lowerCAmelCase = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = TFConvBertModelTester(self)
__A : str = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase)
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__A : List[str] = True
__A : List[str] = True
if hasattr(_UpperCAmelCase , 'use_cache'):
__A : List[Any] = True
__A : str = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length)
__A : Union[str, Any] = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
for model_class in self.all_model_classes:
__A : List[str] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)
__A : Optional[int] = model_class(_UpperCAmelCase)
__A : Optional[Any] = len(model(_UpperCAmelCase))
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase)
__A : Union[str, Any] = os.path.join(_UpperCAmelCase , 'saved_model' , '1')
__A : Tuple = tf.keras.models.load_model(_UpperCAmelCase)
__A : str = model(_UpperCAmelCase)
if self.is_encoder_decoder:
__A : Optional[int] = outputs['encoder_hidden_states']
__A : str = outputs['encoder_attentions']
else:
__A : List[Any] = outputs['hidden_states']
__A : Optional[Any] = outputs['attentions']
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
__A : str = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
self.assertListEqual(
list(output_hidden_states[0].shape[-2:]) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(output_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = TFConvBertModel.from_pretrained('YituTech/conv-bert-base')
self.assertIsNotNone(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__A : Any = True
__A : str = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length)
__A : Any = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length)
__A : int = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
__A : Tuple = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
def check_decoder_attentions_output(_UpperCAmelCase):
__A : List[str] = len(_UpperCAmelCase)
self.assertEqual(out_len % 2 , 0)
__A : Any = outputs.decoder_attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_UpperCAmelCase):
__A : str = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__A : Dict = True
__A : Any = False
__A : str = model_class(_UpperCAmelCase)
__A : List[str] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : List[str] = len(_UpperCAmelCase)
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
if self.is_encoder_decoder:
__A : Union[str, Any] = model_class(_UpperCAmelCase)
__A : int = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_decoder_attentions_output(_UpperCAmelCase)
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__A : int = True
__A : Tuple = model_class(_UpperCAmelCase)
__A : Dict = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
# Check attention is always last and order is fine
__A : Any = True
__A : str = True
__A : Union[str, Any] = model_class(_UpperCAmelCase)
__A : Union[str, Any] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase))
self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
@require_tf
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = TFConvBertModel.from_pretrained('YituTech/conv-bert-base')
__A : str = tf.constant([[0, 1, 2, 3, 4, 5]])
__A : Optional[int] = model(_UpperCAmelCase)[0]
__A : List[Any] = [1, 6, 768]
self.assertEqual(output.shape , _UpperCAmelCase)
__A : Tuple = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
])
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4) | 8 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase : str = {
'''configuration_deberta''': ['''DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DebertaConfig''', '''DebertaOnnxConfig'''],
'''tokenization_deberta''': ['''DebertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = ['''DebertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'''DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DebertaForMaskedLM''',
'''DebertaForQuestionAnswering''',
'''DebertaForSequenceClassification''',
'''DebertaForTokenClassification''',
'''DebertaModel''',
'''DebertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = [
'''TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDebertaForMaskedLM''',
'''TFDebertaForQuestionAnswering''',
'''TFDebertaForSequenceClassification''',
'''TFDebertaForTokenClassification''',
'''TFDebertaModel''',
'''TFDebertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 |
'''simple docstring'''
import argparse
import os
import re
lowercase__ : Optional[int] = '''src/diffusers'''
# Pattern that looks at the indentation in a line.
lowercase__ : Dict = re.compile(r'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
lowercase__ : List[str] = re.compile(r'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowercase__ : Tuple = re.compile(r'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
lowercase__ : str = re.compile(r'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowercase__ : str = re.compile(r'''\[([^\]]+)\]''')
def _lowerCAmelCase ( __snake_case : str ) -> Tuple:
__A : List[Any] = _re_indent.search(__snake_case )
return "" if search is None else search.groups()[0]
def _lowerCAmelCase ( __snake_case : Optional[Any] , __snake_case : str="" , __snake_case : Any=None , __snake_case : List[Any]=None ) -> Optional[int]:
__A : Tuple = 0
__A : Optional[int] = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(__snake_case ):
index += 1
__A : Optional[int] = ['\n'.join(lines[:index] )]
else:
__A : Any = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__A : Tuple = [lines[index]]
index += 1
while index < len(__snake_case ) and (end_prompt is None or not lines[index].startswith(__snake_case )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(__snake_case ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(__snake_case ) )
if index < len(__snake_case ) - 1:
__A : Union[str, Any] = [lines[index + 1]]
index += 1
else:
__A : Union[str, Any] = []
else:
blocks.append('\n'.join(__snake_case ) )
__A : Optional[Any] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(__snake_case ) > 0:
blocks.append('\n'.join(__snake_case ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(__snake_case ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def _lowerCAmelCase ( __snake_case : List[Any] ) -> int:
def _inner(__snake_case : List[Any] ):
return key(__snake_case ).lower().replace('_' , '' )
return _inner
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : Any=None ) -> List[Any]:
# If no key is provided, we use a noop.
def noop(__snake_case : List[Any] ):
return x
if key is None:
__A : Optional[Any] = noop
# Constants are all uppercase, they go first.
__A : str = [obj for obj in objects if key(__snake_case ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__A : List[str] = [obj for obj in objects if key(__snake_case )[0].isupper() and not key(__snake_case ).isupper()]
# Functions begin with a lowercase, they go last.
__A : str = [obj for obj in objects if not key(__snake_case )[0].isupper()]
__A : Tuple = ignore_underscore(__snake_case )
return sorted(__snake_case , key=__snake_case ) + sorted(__snake_case , key=__snake_case ) + sorted(__snake_case , key=__snake_case )
def _lowerCAmelCase ( __snake_case : Optional[int] ) -> Tuple:
# This inner function sort imports between [ ].
def _replace(__snake_case : Tuple ):
__A : List[str] = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
__A : int = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__A : Dict = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(__snake_case )] ) + "]"
__A : List[Any] = import_statement.split('\n' )
if len(__snake_case ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__A : Optional[int] = 2 if lines[1].strip() == '[' else 1
__A : Any = [(i, _re_strip_line.search(__snake_case ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__A : Optional[int] = sort_objects(__snake_case , key=lambda __snake_case : x[1] )
__A : Any = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(__snake_case ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__A : Union[str, Any] = _re_bracket_content.sub(_replace , lines[1] )
else:
__A : Dict = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__A : Tuple = keys[:-1]
__A : List[Any] = get_indent(lines[1] ) + ', '.join([f'"{k}"' for k in sort_objects(__snake_case )] )
return "\n".join(__snake_case )
else:
# Finally we have to deal with imports fitting on one line
__A : Optional[Any] = _re_bracket_content.sub(_replace , __snake_case )
return import_statement
def _lowerCAmelCase ( __snake_case : List[Any] , __snake_case : List[Any]=True ) -> Optional[Any]:
with open(__snake_case , 'r' ) as f:
__A : Dict = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__A : str = split_code_in_indented_blocks(
__snake_case , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(__snake_case ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__A : Tuple = main_blocks[block_idx]
__A : int = block.split('\n' )
# Get to the start of the imports.
__A : Tuple = 0
while line_idx < len(__snake_case ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__A : Optional[int] = len(__snake_case )
else:
line_idx += 1
if line_idx >= len(__snake_case ):
continue
# Ignore beginning and last line: they don't contain anything.
__A : Dict = '\n'.join(block_lines[line_idx:-1] )
__A : int = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__A : Optional[int] = split_code_in_indented_blocks(__snake_case , indent_level=__snake_case )
# We have two categories of import key: list or _import_structure[key].append/extend
__A : Any = _re_direct_key if '_import_structure' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__A : Dict = [(pattern.search(__snake_case ).groups()[0] if pattern.search(__snake_case ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__A : Optional[Any] = [(i, key) for i, key in enumerate(__snake_case ) if key is not None]
__A : Tuple = [x[0] for x in sorted(__snake_case , key=lambda __snake_case : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__A : str = 0
__A : Any = []
for i in range(len(__snake_case ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
__A : str = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(__snake_case )
count += 1
# And we put our main block back together with its first and last line.
__A : int = '\n'.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(__snake_case ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(__snake_case , 'w' ) as f:
f.write('\n'.join(__snake_case ) )
def _lowerCAmelCase ( __snake_case : int=True ) -> Optional[Any]:
__A : Tuple = []
for root, _, files in os.walk(__snake_case ):
if "__init__.py" in files:
__A : List[Any] = sort_imports(os.path.join(__snake_case , '__init__.py' ) , check_only=__snake_case )
if result:
__A : Dict = [os.path.join(__snake_case , '__init__.py' )]
if len(__snake_case ) > 0:
raise ValueError(f'Would overwrite {len(__snake_case )} files, run `make style`.' )
if __name__ == "__main__":
lowercase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
lowercase__ : Union[str, Any] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only) | 8 | 0 |
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _lowercase , _lowercase=13 , _lowercase=32 , _lowercase=3 , _lowercase=4 , _lowercase=[10, 20, 30, 40] , _lowercase=[2, 2, 3, 2] , _lowercase=True , _lowercase=True , _lowercase=37 , _lowercase="gelu" , _lowercase=10 , _lowercase=0.02 , _lowercase=["stage2", "stage3", "stage4"] , _lowercase=3 , _lowercase=None , ):
"""simple docstring"""
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = image_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = num_stages
_lowerCAmelCase = hidden_sizes
_lowerCAmelCase = depths
_lowerCAmelCase = is_training
_lowerCAmelCase = use_labels
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = out_features
_lowerCAmelCase = num_labels
_lowerCAmelCase = scope
_lowerCAmelCase = num_stages
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def _lowercase ( self ):
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def _lowercase ( self ):
"""simple docstring"""
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_lowercase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=_lowercase , loss_ignore_index=255 , num_labels=self.num_labels , )
def _lowercase ( self , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = UperNetForSemanticSegmentation(config=_lowercase )
model.to(_lowercase )
model.eval()
_lowerCAmelCase = model(_lowercase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = config_and_inputs
_lowerCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
_lowercase : Dict = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
_lowercase : int = {'''image-segmentation''': UperNetForSemanticSegmentation} if is_torch_available() else {}
_lowercase : int = False
_lowercase : Dict = False
_lowercase : List[str] = False
_lowercase : List[str] = False
_lowercase : str = False
_lowercase : Any = False
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = UperNetModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase , hidden_size=37 )
def _lowercase ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase ( self ):
"""simple docstring"""
return
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(_lowercase )
_lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase = [*signature.parameters.keys()]
_lowerCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowercase )
@unittest.skip(reason="""UperNet does not use inputs_embeds""" )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="""UperNet does not support input and output embeddings""" )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def _lowercase ( self ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
def check_hidden_states_output(_lowercase , _lowercase , _lowercase ):
_lowerCAmelCase = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(_lowercase , _lowercase ) )
_lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCAmelCase = self.model_tester.num_stages
self.assertEqual(len(_lowercase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = _config_zero_init(_lowercase )
_lowerCAmelCase = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(config=_lowercase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@unittest.skip(reason="""UperNet does not have tied weights""" )
def _lowercase ( self ):
"""simple docstring"""
pass
@slow
def _lowercase ( self ):
"""simple docstring"""
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = UperNetForSemanticSegmentation.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def A ():
_lowerCAmelCase = hf_hub_download(
repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" )
_lowerCAmelCase = Image.open(__lowerCamelCase ).convert("""RGB""" )
return image
@require_torch
@require_vision
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" )
_lowerCAmelCase = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(_lowercase )
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = processor(images=_lowercase , return_tensors="""pt""" ).to(_lowercase )
with torch.no_grad():
_lowerCAmelCase = model(**_lowercase )
_lowerCAmelCase = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _lowercase )
_lowerCAmelCase = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _lowercase , atol=1e-4 ) )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" )
_lowerCAmelCase = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(_lowercase )
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = processor(images=_lowercase , return_tensors="""pt""" ).to(_lowercase )
with torch.no_grad():
_lowerCAmelCase = model(**_lowercase )
_lowerCAmelCase = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _lowercase )
_lowerCAmelCase = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _lowercase , atol=1e-4 ) )
| 5 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : int ) -> bool:
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('''Program to check whether a number is a Perfect number or not...''')
lowercase__ : int = int(input('''Enter number: ''').strip())
print(f"""{number} is {"" if perfect(number) else "not "}a Perfect Number.""") | 8 | 0 |
class UpperCamelCase_ :
def __init__( self :Optional[int] , __A :str = "" , __A :bool = False ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = {}
# A node will be a leaf if the tree contains its word
SCREAMING_SNAKE_CASE__ = is_leaf
SCREAMING_SNAKE_CASE__ = prefix
def _snake_case ( self :Optional[int] , __A :str ) -> tuple[str, str, str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 0
for q, w in zip(self.prefix , __A ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def _snake_case ( self :Any , __A :list[str] ) -> None:
"""simple docstring"""
for word in words:
self.insert(__A )
def _snake_case ( self :Tuple , __A :str ) -> None:
"""simple docstring"""
if self.prefix == word:
SCREAMING_SNAKE_CASE__ = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
SCREAMING_SNAKE_CASE__ = RadixNode(prefix=__A , is_leaf=__A )
else:
SCREAMING_SNAKE_CASE__ = self.nodes[word[0]]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = incoming_node.match(
__A )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(__A )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
SCREAMING_SNAKE_CASE__ = remaining_prefix
SCREAMING_SNAKE_CASE__ = self.nodes[matching_string[0]]
SCREAMING_SNAKE_CASE__ = RadixNode(__A , __A )
SCREAMING_SNAKE_CASE__ = aux_node
if remaining_word == "":
SCREAMING_SNAKE_CASE__ = True
else:
self.nodes[matching_string[0]].insert(__A )
def _snake_case ( self :Optional[int] , __A :str ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.nodes.get(word[0] , __A )
if not incoming_node:
return False
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = incoming_node.match(
__A )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(__A )
def _snake_case ( self :Optional[Any] , __A :str ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.nodes.get(word[0] , __A )
if not incoming_node:
return False
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = incoming_node.match(
__A )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(__A )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
SCREAMING_SNAKE_CASE__ = list(self.nodes.values() )[0]
SCREAMING_SNAKE_CASE__ = merging_node.is_leaf
self.prefix += merging_node.prefix
SCREAMING_SNAKE_CASE__ = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
SCREAMING_SNAKE_CASE__ = False
# If there is 1 edge, we merge it with its child
else:
SCREAMING_SNAKE_CASE__ = list(incoming_node.nodes.values() )[0]
SCREAMING_SNAKE_CASE__ = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
SCREAMING_SNAKE_CASE__ = merging_node.nodes
return True
def _snake_case ( self :Optional[Any] , __A :int = 0 ) -> None:
"""simple docstring"""
if self.prefix != "":
print("""-""" * height , self.prefix , """ (leaf)""" if self.is_leaf else """""" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = """banana bananas bandana band apple all beast""".split()
SCREAMING_SNAKE_CASE__ = RadixNode()
root.insert_many(UpperCamelCase__ )
assert all(root.find(UpperCamelCase__ ) for word in words )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def SCREAMING_SNAKE_CASE__ ( ):
assert test_trie()
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = RadixNode()
SCREAMING_SNAKE_CASE__ = """banana bananas bandanas bandana band apple all beast""".split()
root.insert_many(UpperCamelCase__ )
print("""Words:""" , UpperCamelCase__ )
print("""Tree:""" )
root.print_tree()
if __name__ == "__main__":
main() | 6 |
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Tuple = logging.get_logger(__name__)
lowercase__ : str = [
['''attention''', '''attn'''],
['''encoder_attention''', '''encoder_attn'''],
['''q_lin''', '''q_proj'''],
['''k_lin''', '''k_proj'''],
['''v_lin''', '''v_proj'''],
['''out_lin''', '''out_proj'''],
['''norm_embeddings''', '''layernorm_embedding'''],
['''position_embeddings''', '''embed_positions'''],
['''embeddings''', '''embed_tokens'''],
['''ffn.lin''', '''fc'''],
]
def _lowerCAmelCase ( __snake_case : List[Any] ) -> Tuple:
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__A : Optional[Any] = k.replace(__snake_case , __snake_case )
if k.startswith('encoder' ):
__A : Any = k.replace('.attn' , '.self_attn' )
__A : Any = k.replace('norm1' , 'self_attn_layer_norm' )
__A : str = k.replace('norm2' , 'final_layer_norm' )
elif k.startswith('decoder' ):
__A : Tuple = k.replace('norm1' , 'self_attn_layer_norm' )
__A : str = k.replace('norm2' , 'encoder_attn_layer_norm' )
__A : int = k.replace('norm3' , 'final_layer_norm' )
return k
def _lowerCAmelCase ( __snake_case : List[Any] ) -> Dict:
__A : Optional[int] = [
'model.encoder.layernorm_embedding.weight',
'model.encoder.layernorm_embedding.bias',
'model.decoder.layernorm_embedding.weight',
'model.decoder.layernorm_embedding.bias',
]
for k in keys:
__A : Tuple = sd.pop(__snake_case )
__A : Union[str, Any] = k.replace('layernorm_embedding' , 'layer_norm' )
assert new_k not in sd
__A : str = v
lowercase__ : Tuple = ['''START''']
@torch.no_grad()
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : Any , __snake_case : List[Any] ) -> int:
__A : List[str] = torch.load(__snake_case , map_location='cpu' )
__A : Tuple = model['model']
__A : str = BlenderbotConfig.from_json_file(__snake_case )
__A : int = BlenderbotForConditionalGeneration(__snake_case )
__A : List[Any] = m.model.state_dict().keys()
__A : Optional[int] = []
__A : Optional[int] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__A : Union[str, Any] = rename_state_dict_key(__snake_case )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__A : Optional[Any] = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__snake_case )
m.model.load_state_dict(__snake_case , strict=__snake_case )
m.half()
m.save_pretrained(__snake_case )
if __name__ == "__main__":
lowercase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''')
parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''')
parser.add_argument(
'''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use'''
)
lowercase__ : Optional[Any] = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json) | 8 | 0 |
"""simple docstring"""
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'split_dict' , [
SplitDict(),
SplitDict({'train': SplitInfo(name='train' , num_bytes=13_37 , num_examples=42 , dataset_name='my_dataset' )} ),
SplitDict({'train': SplitInfo(name='train' , num_bytes=13_37 , num_examples=42 )} ),
SplitDict({'train': SplitInfo()} ),
] , )
def _snake_case ( _snake_case : SplitDict ) -> Optional[Any]:
'''simple docstring'''
_A = split_dict._to_yaml_list()
assert len(_snake_case ) == len(_snake_case )
_A = SplitDict._from_yaml_list(_snake_case )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
_A = None
# the split name of split_dict takes over the name of the split info object
_A = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'split_info' , [SplitInfo(), SplitInfo(dataset_name=_snake_case ), SplitInfo(dataset_name='my_dataset' )] )
def _snake_case ( _snake_case : Any ) -> Dict:
'''simple docstring'''
_A = asdict(SplitDict({'train': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 7 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None):
'''simple docstring'''
__A : List[Any] = list(poly_a or [0])[:]
__A : Optional[int] = list(poly_b or [0])[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
__A : Union[str, Any] = len(self.polyA)
while self.polyB[-1] == 0:
self.polyB.pop()
__A : Optional[int] = len(self.polyB)
# Add 0 to make lengths equal a power of 2
__A : Optional[Any] = int(
2 ** np.ceil(np.loga(len(self.polyA) + len(self.polyB) - 1)))
while len(self.polyA) < self.c_max_length:
self.polyA.append(0)
while len(self.polyB) < self.c_max_length:
self.polyB.append(0)
# A complex root used for the fourier transform
__A : str = complex(mpmath.root(x=1 , n=self.c_max_length , k=1))
# The product
__A : Tuple = self.__multiply()
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[int] = [[x] for x in self.polyA] if which == 'A' else [[x] for x in self.polyB]
# Corner case
if len(_UpperCAmelCase) <= 1:
return dft[0]
#
__A : Dict = self.c_max_length // 2
while next_ncol > 0:
__A : Optional[Any] = [[] for i in range(_UpperCAmelCase)]
__A : Tuple = self.root**next_ncol
# First half of next step
__A : Optional[Any] = 1
for j in range(self.c_max_length // (next_ncol * 2)):
for i in range(_UpperCAmelCase):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j])
current_root *= root
# Second half of next step
__A : List[str] = 1
for j in range(self.c_max_length // (next_ncol * 2)):
for i in range(_UpperCAmelCase):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j])
current_root *= root
# Update
__A : Optional[int] = new_dft
__A : Tuple = next_ncol // 2
return dft[0]
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.__dft('A')
__A : Optional[Any] = self.__dft('B')
__A : str = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length)]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0]) <= 1:
return inverce_c[0]
# Inverse DFT
__A : Dict = 2
while next_ncol <= self.c_max_length:
__A : Optional[int] = [[] for i in range(_UpperCAmelCase)]
__A : Any = self.root ** (next_ncol // 2)
__A : Tuple = 1
# First half of next step
for j in range(self.c_max_length // next_ncol):
for i in range(next_ncol // 2):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2)
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root))
current_root *= root
# Update
__A : int = new_inverse_c
next_ncol *= 2
# Unpack
__A : Optional[int] = [round(x[0].real , 8) + round(x[0].imag , 8) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self):
'''simple docstring'''
__A : int = 'A = ' + ' + '.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A]))
__A : Optional[Any] = 'B = ' + ' + '.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B]))
__A : str = 'A*B = ' + ' + '.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.product))
return F'{a}\n{b}\n{c}'
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 0 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def A ( __UpperCamelCase ) -> Union[str, Any]:
A__ = SwinConfig.from_pretrained(
'microsoft/swin-tiny-patch4-window7-224' , out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
A__ = MaskFormerConfig(backbone_config=__UpperCamelCase )
A__ = 'huggingface/label-files'
if "ade20k-full" in model_name:
# this should be ok
A__ = 847
A__ = 'maskformer-ade20k-full-id2label.json'
elif "ade" in model_name:
# this should be ok
A__ = 150
A__ = 'ade20k-id2label.json'
elif "coco-stuff" in model_name:
# this should be ok
A__ = 171
A__ = 'maskformer-coco-stuff-id2label.json'
elif "coco" in model_name:
# TODO
A__ = 133
A__ = 'coco-panoptic-id2label.json'
elif "cityscapes" in model_name:
# this should be ok
A__ = 19
A__ = 'cityscapes-id2label.json'
elif "vistas" in model_name:
# this should be ok
A__ = 65
A__ = 'mapillary-vistas-id2label.json'
A__ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='dataset' ) , 'r' ) )
A__ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
return config
def A ( __UpperCamelCase ) -> Dict:
A__ = []
# stem
# fmt: off
rename_keys.append(('backbone.patch_embed.proj.weight', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.patch_embed.proj.bias', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.patch_embed.norm.weight', 'model.pixel_level_module.encoder.model.embeddings.norm.weight') )
rename_keys.append(('backbone.patch_embed.norm.bias', 'model.pixel_level_module.encoder.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.layers.{i}.downsample.reduction.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.layers.{i}.downsample.norm.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.layers.{i}.downsample.norm.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias''') )
# FPN
rename_keys.append(('sem_seg_head.layer_4.weight', 'model.pixel_level_module.decoder.fpn.stem.0.weight') )
rename_keys.append(('sem_seg_head.layer_4.norm.weight', 'model.pixel_level_module.decoder.fpn.stem.1.weight') )
rename_keys.append(('sem_seg_head.layer_4.norm.bias', 'model.pixel_level_module.decoder.fpn.stem.1.bias') )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight''') )
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight''') )
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias''') )
rename_keys.append(('sem_seg_head.mask_features.weight', 'model.pixel_level_module.decoder.mask_projection.weight') )
rename_keys.append(('sem_seg_head.mask_features.bias', 'model.pixel_level_module.decoder.mask_projection.bias') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias''') )
# cross-attention out projection
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias''') )
# MLP 1
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc1.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc1.bias''') )
# MLP 2
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc2.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc2.bias''') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias''') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias''') )
# layernorm 3 (final layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias''') )
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.weight', 'model.transformer_module.decoder.layernorm.weight') )
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.bias', 'model.transformer_module.decoder.layernorm.bias') )
# heads on top
rename_keys.append(('sem_seg_head.predictor.query_embed.weight', 'model.transformer_module.queries_embedder.weight') )
rename_keys.append(('sem_seg_head.predictor.input_proj.weight', 'model.transformer_module.input_projection.weight') )
rename_keys.append(('sem_seg_head.predictor.input_proj.bias', 'model.transformer_module.input_projection.bias') )
rename_keys.append(('sem_seg_head.predictor.class_embed.weight', 'class_predictor.weight') )
rename_keys.append(('sem_seg_head.predictor.class_embed.bias', 'class_predictor.bias') )
for i in range(3 ):
rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', f'''mask_embedder.{i}.0.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', f'''mask_embedder.{i}.0.bias''') )
# fmt: on
return rename_keys
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[str]:
A__ = dct.pop(__UpperCamelCase )
A__ = val
def A ( __UpperCamelCase , __UpperCamelCase ) -> str:
A__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
A__ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
A__ = state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''' )
A__ = state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[:dim, :]
A__ = in_proj_bias[: dim]
A__ = in_proj_weight[
dim : dim * 2, :
]
A__ = in_proj_bias[
dim : dim * 2
]
A__ = in_proj_weight[
-dim :, :
]
A__ = in_proj_bias[-dim :]
# fmt: on
def A ( __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
# fmt: off
A__ = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
A__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''' )
A__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[: hidden_size, :]
A__ = in_proj_bias[:config.hidden_size]
A__ = in_proj_weight[hidden_size : hidden_size * 2, :]
A__ = in_proj_bias[hidden_size : hidden_size * 2]
A__ = in_proj_weight[-hidden_size :, :]
A__ = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
A__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''' )
A__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[: hidden_size, :]
A__ = in_proj_bias[:config.hidden_size]
A__ = in_proj_weight[hidden_size : hidden_size * 2, :]
A__ = in_proj_bias[hidden_size : hidden_size * 2]
A__ = in_proj_weight[-hidden_size :, :]
A__ = in_proj_bias[-hidden_size :]
# fmt: on
def A ( ) -> torch.Tensor:
A__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A__ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ) -> List[Any]:
A__ = get_maskformer_config(__UpperCamelCase )
# load original state_dict
with open(__UpperCamelCase , 'rb' ) as f:
A__ = pickle.load(__UpperCamelCase )
A__ = data['model']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
A__ = create_rename_keys(__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
read_in_swin_q_k_v(__UpperCamelCase , config.backbone_config )
read_in_decoder_q_k_v(__UpperCamelCase , __UpperCamelCase )
# update to torch tensors
for key, value in state_dict.items():
A__ = torch.from_numpy(__UpperCamelCase )
# load 🤗 model
A__ = MaskFormerForInstanceSegmentation(__UpperCamelCase )
model.eval()
for name, param in model.named_parameters():
print(__UpperCamelCase , param.shape )
A__ , A__ = model.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(__UpperCamelCase ) == 0, f'''Unexpected keys: {unexpected_keys}'''
# verify results
A__ = prepare_img()
if "vistas" in model_name:
A__ = 65
elif "cityscapes" in model_name:
A__ = 65_535
else:
A__ = 255
A__ = True if 'ade' in model_name else False
A__ = MaskFormerImageProcessor(ignore_index=__UpperCamelCase , reduce_labels=__UpperCamelCase )
A__ = image_processor(__UpperCamelCase , return_tensors='pt' )
A__ = model(**__UpperCamelCase )
print('Logits:' , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
A__ = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , __UpperCamelCase , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
image_processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
print('Pushing model and image processor to the hub...' )
model.push_to_hub(f'''nielsr/{model_name}''' )
image_processor.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''maskformer-swin-tiny-ade''',
type=str,
help=('''Name of the MaskFormer model you\'d like to convert''',),
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl''',
type=str,
help='''Path to the original state dict (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 9 |
'''simple docstring'''
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=[30, 30] , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=10 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=None , _UpperCAmelCase=8 , _UpperCAmelCase=10 , ):
'''simple docstring'''
__A : Union[str, Any] = parent
__A : Tuple = batch_size
__A : List[str] = image_size
__A : Dict = patch_size
__A : Optional[Any] = num_channels
__A : Tuple = is_training
__A : Dict = use_labels
__A : List[Any] = hidden_size
__A : Tuple = num_hidden_layers
__A : int = num_attention_heads
__A : Optional[int] = intermediate_size
__A : Tuple = hidden_act
__A : Any = hidden_dropout_prob
__A : Optional[Any] = attention_probs_dropout_prob
__A : List[Any] = type_sequence_label_size
__A : List[Any] = initializer_range
__A : Optional[int] = num_labels
__A : List[Any] = scope
__A : Any = n_targets
__A : Union[str, Any] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
__A : List[str] = (image_size[1] // patch_size) * (image_size[0] // patch_size)
__A : int = num_patches + 1 + self.num_detection_tokens
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]])
__A : Tuple = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
__A : List[Any] = []
for i in range(self.batch_size):
__A : Optional[int] = {}
__A : Union[str, Any] = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=_UpperCAmelCase)
__A : str = torch.rand(self.n_targets , 4 , device=_UpperCAmelCase)
labels.append(_UpperCAmelCase)
__A : Any = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Any = YolosModel(config=_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
__A : Dict = model(_UpperCAmelCase)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Any = YolosForObjectDetection(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
__A : str = model(pixel_values=_UpperCAmelCase)
__A : List[str] = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1))
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4))
__A : Union[str, Any] = model(pixel_values=_UpperCAmelCase , labels=_UpperCAmelCase)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1))
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = self.prepare_config_and_inputs()
__A ,__A ,__A : Tuple = config_and_inputs
__A : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE (a__ , a__ , unittest.TestCase ):
lowerCAmelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
lowerCAmelCase = (
{'''feature-extraction''': YolosModel, '''object-detection''': YolosForObjectDetection} if is_torch_available() else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False):
'''simple docstring'''
__A : Optional[Any] = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase)
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
__A : Any = []
for i in range(self.model_tester.batch_size):
__A : Tuple = {}
__A : Tuple = torch.ones(
size=(self.model_tester.n_targets,) , device=_UpperCAmelCase , dtype=torch.long)
__A : Optional[Any] = torch.ones(
self.model_tester.n_targets , 4 , device=_UpperCAmelCase , dtype=torch.float)
labels.append(_UpperCAmelCase)
__A : str = labels
return inputs_dict
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = YolosModelTester(self)
__A : Dict = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : Tuple = model_class(_UpperCAmelCase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
__A : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : List[Any] = model_class(_UpperCAmelCase)
__A : str = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A : int = [*signature.parameters.keys()]
__A : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__A : Optional[int] = True
# in YOLOS, the seq_len is different
__A : Dict = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
__A : Dict = True
__A : Dict = False
__A : Union[str, Any] = True
__A : Tuple = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : Any = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Union[str, Any] = outputs.attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__A : List[Any] = True
__A : List[str] = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : List[Any] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Optional[Any] = outputs.attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
__A : str = len(_UpperCAmelCase)
# Check attention is always last and order is fine
__A : Dict = True
__A : Dict = True
__A : Dict = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : List[str] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Union[str, Any] = 1
self.assertEqual(out_len + added_hidden_states , len(_UpperCAmelCase))
__A : Optional[Any] = outputs.attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
__A : Tuple = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : List[str] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Optional[Any] = outputs.hidden_states
__A : List[str] = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
# YOLOS has a different seq_length
__A : Dict = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
__A ,__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : List[str] = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A : Optional[int] = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*_UpperCAmelCase)
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : List[Any] = YolosModel.from_pretrained(_UpperCAmelCase)
self.assertIsNotNone(_UpperCAmelCase)
def _lowerCAmelCase ( ) -> int:
__A : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('hustvl/yolos-small') if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = YolosForObjectDetection.from_pretrained('hustvl/yolos-small').to(_UpperCAmelCase)
__A : Any = self.default_image_processor
__A : str = prepare_img()
__A : int = image_processor(images=_UpperCAmelCase , return_tensors='pt').to(_UpperCAmelCase)
# forward pass
with torch.no_grad():
__A : str = model(inputs.pixel_values)
# verify outputs
__A : Tuple = torch.Size((1, 100, 92))
self.assertEqual(outputs.logits.shape , _UpperCAmelCase)
__A : Dict = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=_UpperCAmelCase , )
__A : int = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=_UpperCAmelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _UpperCAmelCase , atol=1e-4))
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , _UpperCAmelCase , atol=1e-4))
# verify postprocessing
__A : List[str] = image_processor.post_process_object_detection(
_UpperCAmelCase , threshold=0.3 , target_sizes=[image.size[::-1]])[0]
__A : Optional[int] = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861]).to(_UpperCAmelCase)
__A : Union[str, Any] = [75, 75, 17, 63, 17]
__A : Any = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495]).to(_UpperCAmelCase)
self.assertEqual(len(results['scores']) , 5)
self.assertTrue(torch.allclose(results['scores'] , _UpperCAmelCase , atol=1e-4))
self.assertSequenceEqual(results['labels'].tolist() , _UpperCAmelCase)
self.assertTrue(torch.allclose(results['boxes'][0, :] , _UpperCAmelCase)) | 8 | 0 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCAmelCase_ :
def __init__( self : Union[str, Any] , _A : List[Any] , _A : List[Any]=2 , _A : Tuple=True , _A : Any=False , _A : Dict=10 , _A : Optional[int]=3 , _A : Union[str, Any]=32 * 8 , _A : Optional[Any]=32 * 8 , _A : List[str]=4 , _A : Dict=64 , ):
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = is_training
_UpperCamelCase = use_auxiliary_loss
_UpperCamelCase = num_queries
_UpperCamelCase = num_channels
_UpperCamelCase = min_size
_UpperCamelCase = max_size
_UpperCamelCase = num_labels
_UpperCamelCase = hidden_dim
_UpperCamelCase = hidden_dim
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_A )
_UpperCamelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_A )
_UpperCamelCase = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_A ) > 0.5
).float()
_UpperCamelCase = (torch.rand((self.batch_size, self.num_labels) , device=_A ) > 0.5).long()
_UpperCamelCase = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
_UpperCamelCase = self.num_queries
_UpperCamelCase = self.num_labels
_UpperCamelCase = [1, 1, 1, 1]
_UpperCamelCase = self.num_channels
_UpperCamelCase = 64
_UpperCamelCase = 128
_UpperCamelCase = self.hidden_dim
_UpperCamelCase = self.hidden_dim
_UpperCamelCase = self.hidden_dim
return config
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def UpperCamelCase_ ( self : List[str] , _A : Optional[int] , _A : Union[str, Any] ):
_UpperCamelCase = output.encoder_hidden_states
_UpperCamelCase = output.pixel_decoder_hidden_states
_UpperCamelCase = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_A ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_A ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_A ) , config.decoder_layers )
def UpperCamelCase_ ( self : Union[str, Any] , _A : List[Any] , _A : Dict , _A : int , _A : List[Any]=False ):
with torch.no_grad():
_UpperCamelCase = MaskaFormerModel(config=_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(pixel_values=_A , pixel_mask=_A )
_UpperCamelCase = model(_A , output_hidden_states=_A )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_A , _A )
def UpperCamelCase_ ( self : Union[str, Any] , _A : Optional[Any] , _A : Any , _A : Any , _A : Any , _A : Tuple ):
_UpperCamelCase = MaskaFormerForUniversalSegmentation(config=_A )
model.to(_A )
model.eval()
def comm_check_on_output(_A : List[Any] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_UpperCamelCase = model(pixel_values=_A , pixel_mask=_A )
_UpperCamelCase = model(_A )
comm_check_on_output(_A )
_UpperCamelCase = model(
pixel_values=_A , pixel_mask=_A , mask_labels=_A , class_labels=_A )
comm_check_on_output(_A )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCAmelCase_ ( __lowercase, __lowercase, unittest.TestCase ):
UpperCAmelCase = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
UpperCAmelCase = {"feature-extraction": MaskaFormerModel} if is_torch_available() else {}
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = MaskaFormerModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=_A , has_text_modality=_A )
def UpperCamelCase_ ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_A , **_A , output_hidden_states=_A )
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_A )
@unittest.skip(reason='''Mask2Former does not use inputs_embeds''' )
def UpperCamelCase_ ( self : Dict ):
pass
@unittest.skip(reason='''Mask2Former does not have a get_input_embeddings method''' )
def UpperCamelCase_ ( self : str ):
pass
@unittest.skip(reason='''Mask2Former is not a generative model''' )
def UpperCamelCase_ ( self : Union[str, Any] ):
pass
@unittest.skip(reason='''Mask2Former does not use token embeddings''' )
def UpperCamelCase_ ( self : List[str] ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def UpperCamelCase_ ( self : Tuple ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCamelCase_ ( self : Union[str, Any] ):
pass
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(_A )
_UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
@slow
def UpperCamelCase_ ( self : int ):
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
_UpperCamelCase = MaskaFormerModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = (self.model_tester.min_size,) * 2
_UpperCamelCase = {
'''pixel_values''': torch.randn((2, 3, *size) , device=_A ),
'''mask_labels''': torch.randn((2, 10, *size) , device=_A ),
'''class_labels''': torch.zeros(2 , 10 , device=_A ).long(),
}
_UpperCamelCase = self.model_tester.get_config()
_UpperCamelCase = MaskaFormerForUniversalSegmentation(_A ).to(_A )
_UpperCamelCase = model(**_A )
self.assertTrue(outputs.loss is not None )
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_A , **_A , output_hidden_states=_A )
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(_A ).to(_A )
_UpperCamelCase = model(**_A , output_attentions=_A )
self.assertTrue(outputs.attentions is not None )
def UpperCamelCase_ ( self : Optional[Any] ):
if not self.model_tester.is_training:
return
_UpperCamelCase = self.all_model_classes[1]
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs()
_UpperCamelCase = model_class(_A )
model.to(_A )
model.train()
_UpperCamelCase = model(_A , mask_labels=_A , class_labels=_A ).loss
loss.backward()
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = self.all_model_classes[1]
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs()
_UpperCamelCase = True
_UpperCamelCase = True
_UpperCamelCase = model_class(_A ).to(_A )
model.train()
_UpperCamelCase = model(_A , mask_labels=_A , class_labels=_A )
_UpperCamelCase = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_UpperCamelCase = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
_UpperCamelCase = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_UpperCamelCase = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_A )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_lowerCAmelCase = 1E-4
def _snake_case ( ):
_UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self : Any ):
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def UpperCamelCase_ ( self : Union[str, Any] ):
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_A )
_UpperCamelCase = self.default_image_processor
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(_A , return_tensors='''pt''' ).to(_A )
_UpperCamelCase = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_A , (1, 3, 384, 384) )
with torch.no_grad():
_UpperCamelCase = model(**_A )
_UpperCamelCase = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(_A )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _A , atol=_A ) )
_UpperCamelCase = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(_A )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _A , atol=_A ) )
_UpperCamelCase = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(_A )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _A , atol=_A ) )
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_A ).eval()
_UpperCamelCase = self.default_image_processor
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(_A , return_tensors='''pt''' ).to(_A )
_UpperCamelCase = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_A , (1, 3, 384, 384) )
with torch.no_grad():
_UpperCamelCase = model(**_A )
# masks_queries_logits
_UpperCamelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
_UpperCamelCase = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
_UpperCamelCase = torch.tensor(_A ).to(_A )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _A , atol=_A ) )
# class_queries_logits
_UpperCamelCase = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
_UpperCamelCase = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(_A )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _A , atol=_A ) )
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_A ).eval()
_UpperCamelCase = self.default_image_processor
_UpperCamelCase = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='''pt''' , )
_UpperCamelCase = inputs['''pixel_values'''].to(_A )
_UpperCamelCase = [el.to(_A ) for el in inputs['''mask_labels''']]
_UpperCamelCase = [el.to(_A ) for el in inputs['''class_labels''']]
with torch.no_grad():
_UpperCamelCase = model(**_A )
self.assertTrue(outputs.loss is not None )
| 10 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
lowercase__ : Optional[int] = None
lowercase__ : List[str] = logging.get_logger(__name__)
lowercase__ : Optional[Any] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase__ : List[str] = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/tokenizer.json''',
},
}
lowercase__ : Dict = {
'''camembert-base''': 5_12,
}
lowercase__ : str = '''▁'''
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = ['''input_ids''', '''attention_mask''']
lowerCAmelCase = CamembertTokenizer
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] , **_UpperCAmelCase , ):
'''simple docstring'''
__A : int = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else mask_token
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
__A : List[str] = vocab_file
__A : Optional[int] = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__A : Optional[Any] = [self.cls_token_id]
__A : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
__A : Optional[int] = [self.sep_token_id]
__A : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(_UpperCAmelCase):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
__A : List[Any] = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(_UpperCAmelCase):
copyfile(self.vocab_file , _UpperCAmelCase)
return (out_vocab_file,) | 8 | 0 |
'''simple docstring'''
def lowerCAmelCase (__A):
"""simple docstring"""
if not isinstance(__A , __A):
raise ValueError('''multiplicative_persistence() only accepts integral values''')
if num < 0:
raise ValueError('''multiplicative_persistence() does not accept negative values''')
_a = 0
_a = str(__A)
while len(__A) != 1:
_a = [int(__A) for i in num_string]
_a = 1
for i in range(0 , len(__A)):
total *= numbers[i]
_a = str(__A)
steps += 1
return steps
def lowerCAmelCase (__A):
"""simple docstring"""
if not isinstance(__A , __A):
raise ValueError('''additive_persistence() only accepts integral values''')
if num < 0:
raise ValueError('''additive_persistence() does not accept negative values''')
_a = 0
_a = str(__A)
while len(__A) != 1:
_a = [int(__A) for i in num_string]
_a = 0
for i in range(0 , len(__A)):
total += numbers[i]
_a = str(__A)
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
lowercase__ : Any = '''hf-internal-testing/tiny-random-bert'''
lowercase__ : Optional[Any] = os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''')
lowercase__ : List[Any] = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6'''
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase)
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(_UpperCAmelCase))
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(_UpperCAmelCase , _UpperCAmelCase)))
with open(os.path.join(_UpperCAmelCase , 'refs' , 'main')) as f:
__A : Any = f.read()
self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'snapshots' , _UpperCAmelCase , _UpperCAmelCase))
self.assertTrue(os.path.isfile(_UpperCAmelCase))
# File is cached at the same place the second time.
__A : Tuple = cached_file(_UpperCAmelCase , _UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase)
# Using a specific revision to test the full commit hash.
__A : List[Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='9b8c223')
self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'snapshots' , _UpperCAmelCase , _UpperCAmelCase))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid model identifier'):
__A : Dict = cached_file('tiny-random-bert' , _UpperCAmelCase)
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid git identifier'):
__A : Optional[int] = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='aaaa')
with self.assertRaisesRegex(_UpperCAmelCase , 'does not appear to have a file named'):
__A : int = cached_file(_UpperCAmelCase , 'conf')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaisesRegex(_UpperCAmelCase , 'does not appear to have a file named'):
__A : Any = cached_file(_UpperCAmelCase , 'conf')
with open(os.path.join(_UpperCAmelCase , 'refs' , 'main')) as f:
__A : Dict = f.read()
self.assertTrue(os.path.isfile(os.path.join(_UpperCAmelCase , '.no_exist' , _UpperCAmelCase , 'conf')))
__A : List[Any] = cached_file(_UpperCAmelCase , 'conf' , _raise_exceptions_for_missing_entries=_UpperCAmelCase)
self.assertIsNone(_UpperCAmelCase)
__A : str = cached_file(_UpperCAmelCase , 'conf' , local_files_only=_UpperCAmelCase , _raise_exceptions_for_missing_entries=_UpperCAmelCase)
self.assertIsNone(_UpperCAmelCase)
__A : List[str] = mock.Mock()
__A : Dict = 500
__A : List[str] = {}
__A : List[Any] = HTTPError
__A : Optional[Any] = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=_UpperCAmelCase) as mock_head:
__A : Dict = cached_file(_UpperCAmelCase , 'conf' , _raise_exceptions_for_connection_errors=_UpperCAmelCase)
self.assertIsNone(_UpperCAmelCase)
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.assertTrue(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase))
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase))
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.assertIsNone(get_file_from_repo('bert-base-cased' , 'ahah.txt'))
# The function raises if the repository does not exist.
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid model identifier'):
get_file_from_repo('bert-base-case' , _UpperCAmelCase)
# The function raises if the revision does not exist.
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid git identifier'):
get_file_from_repo('bert-base-cased' , _UpperCAmelCase , revision='ahaha')
__A : List[str] = get_file_from_repo('bert-base-cased' , _UpperCAmelCase)
# The name is the cached name which is not very easy to test, so instead we load the content.
__A : List[str] = json.loads(open(_UpperCAmelCase , 'r').read())
self.assertEqual(config['hidden_size'] , 768)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__A : Tuple = Path(_UpperCAmelCase) / 'a.txt'
filename.touch()
self.assertEqual(get_file_from_repo(_UpperCAmelCase , 'a.txt') , str(_UpperCAmelCase))
self.assertIsNone(get_file_from_repo(_UpperCAmelCase , 'b.txt')) | 8 | 0 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCamelCase__ : Optional[Any] = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase_ )
class _snake_case ( UpperCAmelCase_ ):
def __init__( self , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_)
if self.framework == "tf":
raise ValueError(f'The {self.__class__} is only available in PyTorch.')
requires_backends(self , """vision""")
self.check_model_type(SCREAMING_SNAKE_CASE_)
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
if "text_queries" in kwargs:
lowercase__ : Any = kwargs.pop("""text_queries""")
if isinstance(SCREAMING_SNAKE_CASE_ , (str, Image.Image)):
lowercase__ : Optional[Any] = {"""image""": image, """candidate_labels""": candidate_labels}
else:
lowercase__ : int = image
lowercase__ : List[str] = super().__call__(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
return results
def lowercase__ ( self , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = {}
if "threshold" in kwargs:
lowercase__ : List[Any] = kwargs["""threshold"""]
if "top_k" in kwargs:
lowercase__ : int = kwargs["""top_k"""]
return {}, {}, postprocess_params
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : str = load_image(inputs["""image"""])
lowercase__ : Any = inputs["""candidate_labels"""]
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
lowercase__ : List[str] = candidate_labels.split(""",""")
lowercase__ : Tuple = torch.tensor([[image.height, image.width]] , dtype=torch.intaa)
for i, candidate_label in enumerate(SCREAMING_SNAKE_CASE_):
lowercase__ : Optional[Any] = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=self.framework)
lowercase__ : Union[str, Any] = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=self.framework)
yield {
"is_last": i == len(SCREAMING_SNAKE_CASE_) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : str = model_inputs.pop("""target_size""")
lowercase__ : Optional[int] = model_inputs.pop("""candidate_label""")
lowercase__ : Dict = model_inputs.pop("""is_last""")
lowercase__ : Union[str, Any] = self.model(**SCREAMING_SNAKE_CASE_)
lowercase__ : Union[str, Any] = {"""target_size""": target_size, """candidate_label""": candidate_label, """is_last""": is_last, **outputs}
return model_outputs
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=None):
'''simple docstring'''
lowercase__ : Union[str, Any] = []
for model_output in model_outputs:
lowercase__ : Optional[int] = model_output["""candidate_label"""]
lowercase__ : Tuple = BaseModelOutput(SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = self.image_processor.post_process_object_detection(
outputs=SCREAMING_SNAKE_CASE_ , threshold=SCREAMING_SNAKE_CASE_ , target_sizes=model_output["""target_size"""])[0]
for index in outputs["scores"].nonzero():
lowercase__ : Optional[Any] = outputs["""scores"""][index].item()
lowercase__ : Optional[Any] = self._get_bounding_box(outputs["""boxes"""][index][0])
lowercase__ : Tuple = {"""score""": score, """label""": label, """box""": box}
results.append(SCREAMING_SNAKE_CASE_)
lowercase__ : int = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_: x["score"] , reverse=SCREAMING_SNAKE_CASE_)
if top_k:
lowercase__ : Any = results[:top_k]
return results
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if self.framework != "pt":
raise ValueError("""The ZeroShotObjectDetectionPipeline is only available in PyTorch.""")
lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[Any] = box.int().tolist()
lowercase__ : Optional[int] = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 12 |
'''simple docstring'''
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def _lowerCAmelCase ( __snake_case : str , __snake_case : str , **__snake_case : List[Any] ) -> Any:
__A : Optional[Any] = AutoConfig.from_pretrained(__snake_case , **__snake_case )
__A : int = AutoModelForSeqaSeqLM.from_config(__snake_case )
model.save_pretrained(__snake_case )
AutoTokenizer.from_pretrained(__snake_case ).save_pretrained(__snake_case )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version) | 8 | 0 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
A__ : int = 8
def UpperCAmelCase__ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int=BITS ) -> int:
__lowerCamelCase : List[Any] = x.device
__lowerCamelCase : Union[str, Any] = (x * 2_55).int().clamp(0 , 2_55 )
__lowerCamelCase : str = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCAmelCase_ )
__lowerCamelCase : List[str] = rearrange(UpperCAmelCase_ , 'd -> d 1 1' )
__lowerCamelCase : Optional[int] = rearrange(UpperCAmelCase_ , 'b c h w -> b c 1 h w' )
__lowerCamelCase : int = ((x & mask) != 0).float()
__lowerCamelCase : List[Any] = rearrange(UpperCAmelCase_ , 'b c d h w -> b (c d) h w' )
__lowerCamelCase : str = bits * 2 - 1
return bits
def UpperCAmelCase__ ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : str=BITS ) -> Tuple:
__lowerCamelCase : int = x.device
__lowerCamelCase : Tuple = (x > 0).int()
__lowerCamelCase : Optional[int] = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCAmelCase_ , dtype=torch.intaa )
__lowerCamelCase : Tuple = rearrange(UpperCAmelCase_ , 'd -> d 1 1' )
__lowerCamelCase : List[Any] = rearrange(UpperCAmelCase_ , 'b (c d) h w -> b c d h w' , d=8 )
__lowerCamelCase : Dict = reduce(x * mask , 'b c d h w -> b c h w' , 'sum' )
return (dec / 2_55).clamp(0.0 , 1.0 )
def UpperCAmelCase__ ( self : str , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : int , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
__lowerCamelCase : Tuple = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
__lowerCamelCase : Any = self.alphas_cumprod[timestep]
__lowerCamelCase : int = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
__lowerCamelCase : Dict = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowerCamelCase : Union[str, Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
__lowerCamelCase : Any = self.bit_scale
if self.config.clip_sample:
__lowerCamelCase : Tuple = torch.clamp(UpperCAmelCase_ , -scale , UpperCAmelCase_ )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
__lowerCamelCase : int = self._get_variance(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : Any = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
__lowerCamelCase : Any = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowerCamelCase : List[Any] = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowerCamelCase : List[Any] = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
__lowerCamelCase : int = model_output.device if torch.is_tensor(UpperCAmelCase_ ) else 'cpu'
__lowerCamelCase : List[Any] = torch.randn(model_output.shape , dtype=model_output.dtype , generator=UpperCAmelCase_ ).to(UpperCAmelCase_ )
__lowerCamelCase : int = self._get_variance(UpperCAmelCase_ , UpperCAmelCase_ ) ** 0.5 * eta * noise
__lowerCamelCase : Tuple = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=UpperCAmelCase_ , pred_original_sample=UpperCAmelCase_ )
def UpperCAmelCase__ ( self : Optional[Any] , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : int , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : int="epsilon" , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : bool = True , ) -> Union[DDPMSchedulerOutput, Tuple]:
__lowerCamelCase : Tuple = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
__lowerCamelCase , __lowerCamelCase : int = torch.split(UpperCAmelCase_ , sample.shape[1] , dim=1 )
else:
__lowerCamelCase : Union[str, Any] = None
# 1. compute alphas, betas
__lowerCamelCase : Tuple = self.alphas_cumprod[t]
__lowerCamelCase : Optional[Any] = self.alphas_cumprod[t - 1] if t > 0 else self.one
__lowerCamelCase : Optional[Any] = 1 - alpha_prod_t
__lowerCamelCase : int = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
__lowerCamelCase : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
__lowerCamelCase : Union[str, Any] = model_output
else:
raise ValueError(F'Unsupported prediction_type {prediction_type}.' )
# 3. Clip "predicted x_0"
__lowerCamelCase : List[Any] = self.bit_scale
if self.config.clip_sample:
__lowerCamelCase : List[Any] = torch.clamp(UpperCAmelCase_ , -scale , UpperCAmelCase_ )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowerCamelCase : List[str] = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
__lowerCamelCase : Dict = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowerCamelCase : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
__lowerCamelCase : Any = 0
if t > 0:
__lowerCamelCase : str = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=UpperCAmelCase_ ).to(model_output.device )
__lowerCamelCase : str = (self._get_variance(UpperCAmelCase_ , predicted_variance=UpperCAmelCase_ ) ** 0.5) * noise
__lowerCamelCase : List[Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=UpperCAmelCase_ , pred_original_sample=UpperCAmelCase_ )
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 1.0 , ) -> Dict:
super().__init__()
__lowerCamelCase : Tuple = bit_scale
__lowerCamelCase : str = (
ddim_bit_scheduler_step if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE_ = 2_56 , SCREAMING_SNAKE_CASE_ = 2_56 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , **SCREAMING_SNAKE_CASE_ , ) -> Union[Tuple, ImagePipelineOutput]:
__lowerCamelCase : Optional[Any] = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : Optional[Any] = decimal_to_bits(SCREAMING_SNAKE_CASE_ ) * self.bit_scale
__lowerCamelCase : Optional[int] = latents.to(self.device )
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
__lowerCamelCase : List[str] = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample
# compute the previous noisy sample x_t -> x_t-1
__lowerCamelCase : Tuple = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
__lowerCamelCase : Dict = bits_to_decimal(SCREAMING_SNAKE_CASE_ )
if output_type == "pil":
__lowerCamelCase : Tuple = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE_ )
| 13 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
lowercase__ : Any = {
'''google/tapas-base-finetuned-sqa''': (
'''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wtq''': (
'''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wikisql-supervised''': (
'''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-tabfact''': (
'''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''tapas'''
def __init__( self , _UpperCAmelCase=3_0522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1024 , _UpperCAmelCase=[3, 256, 256, 2, 256, 256, 10] , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0 , _UpperCAmelCase=10.0 , _UpperCAmelCase=0 , _UpperCAmelCase=1.0 , _UpperCAmelCase=None , _UpperCAmelCase=1.0 , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=1.0 , _UpperCAmelCase=1.0 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase="ratio" , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=64 , _UpperCAmelCase=32 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase)
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
__A : Dict = vocab_size
__A : Tuple = hidden_size
__A : Any = num_hidden_layers
__A : int = num_attention_heads
__A : Tuple = hidden_act
__A : Tuple = intermediate_size
__A : List[Any] = hidden_dropout_prob
__A : int = attention_probs_dropout_prob
__A : List[str] = max_position_embeddings
__A : Optional[int] = type_vocab_sizes
__A : str = initializer_range
__A : List[str] = layer_norm_eps
# Fine-tuning task hyperparameters
__A : List[str] = positive_label_weight
__A : List[Any] = num_aggregation_labels
__A : Optional[Any] = aggregation_loss_weight
__A : Tuple = use_answer_as_supervision
__A : List[str] = answer_loss_importance
__A : Any = use_normalized_answer_loss
__A : Any = huber_loss_delta
__A : Union[str, Any] = temperature
__A : Tuple = aggregation_temperature
__A : Optional[Any] = use_gumbel_for_cells
__A : List[str] = use_gumbel_for_aggregation
__A : Tuple = average_approximation_function
__A : List[str] = cell_selection_preference
__A : Dict = answer_loss_cutoff
__A : Union[str, Any] = max_num_rows
__A : Optional[Any] = max_num_columns
__A : int = average_logits_per_cell
__A : Optional[Any] = select_one_column
__A : int = allow_empty_column_selection
__A : List[Any] = init_cell_selection_weights_to_zero
__A : int = reset_position_index_per_cell
__A : Union[str, Any] = disable_per_token_loss
# Aggregation hyperparameters
__A : Optional[Any] = aggregation_labels
__A : List[str] = no_aggregation_label_index
if isinstance(self.aggregation_labels , _UpperCAmelCase):
__A : Optional[Any] = {int(_UpperCAmelCase): v for k, v in aggregation_labels.items()} | 8 | 0 |
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
a__ = (
'''https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'''
)
a__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def __UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_a : Any = '''https://pypi.org/pypi/diffusers/json'''
_a : Optional[int] = json.loads(request.urlopen(__a ).read() )['''releases'''].keys()
return sorted(__a ,key=lambda __a : version.Version(__a ) )
def __UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(__a )
os.makedirs(__a ,exist_ok=__a )
_a : int = Path(__a ) / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def __UpperCAmelCase ( __a : Union[str, os.PathLike] ) -> Optional[Any]:
"""simple docstring"""
init_hf_modules()
_a : List[Any] = Path(__a ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(__a ,exist_ok=__a )
_a : Any = dynamic_module_path / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def __UpperCAmelCase ( __a : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
with open(__a ,'''r''' ,encoding='''utf-8''' ) as f:
_a : Optional[Any] = f.read()
# Imports of the form `import .xxx`
_a : List[str] = re.findall('''^\s*import\s+\.(\S+)\s*$''' ,__a ,flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' ,__a ,flags=re.MULTILINE )
# Unique-ify
return list(set(__a ) )
def __UpperCAmelCase ( __a : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_a : Optional[int] = False
_a : str = [module_file]
_a : Dict = []
# Let's recurse through all relative imports
while not no_change:
_a : Tuple = []
for f in files_to_check:
new_imports.extend(get_relative_imports(__a ) )
_a : Any = Path(__a ).parent
_a : int = [str(module_path / m ) for m in new_imports]
_a : str = [f for f in new_import_files if f not in all_relative_imports]
_a : int = [F"""{f}.py""" for f in new_import_files]
_a : int = len(__a ) == 0
all_relative_imports.extend(__a )
return all_relative_imports
def __UpperCAmelCase ( __a : str ) -> Any:
"""simple docstring"""
with open(__a ,'''r''' ,encoding='''utf-8''' ) as f:
_a : List[str] = f.read()
# Imports of the form `import xxx`
_a : List[str] = re.findall('''^\s*import\s+(\S+)\s*$''' ,__a ,flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('''^\s*from\s+(\S+)\s+import''' ,__a ,flags=re.MULTILINE )
# Only keep the top-level module
_a : int = [imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )]
# Unique-ify and test we got them all
_a : List[str] = list(set(__a ) )
_a : Tuple = []
for imp in imports:
try:
importlib.import_module(__a )
except ImportError:
missing_packages.append(__a )
if len(__a ) > 0:
raise ImportError(
'''This modeling file requires the following packages that were not found in your environment: '''
F"""{', '.join(__a )}. Run `pip install {' '.join(__a )}`""" )
return get_relative_imports(__a )
def __UpperCAmelCase ( __a : Optional[int] ,__a : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
_a : Any = module_path.replace(os.path.sep ,'''.''' )
_a : Any = importlib.import_module(__a )
if class_name is None:
return find_pipeline_class(__a )
return getattr(__a ,__a )
def __UpperCAmelCase ( __a : Any ) -> List[Any]:
"""simple docstring"""
from ..pipelines import DiffusionPipeline
_a : Tuple = dict(inspect.getmembers(__a ,inspect.isclass ) )
_a : Optional[int] = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls ,__a )
and cls.__module__.split('''.''' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"""
F""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"""
F""" {loaded_module}.""" )
_a : Optional[int] = cls
return pipeline_class
def __UpperCAmelCase ( __a : Union[str, os.PathLike] ,__a : str ,__a : Optional[Union[str, os.PathLike]] = None ,__a : bool = False ,__a : bool = False ,__a : Optional[Dict[str, str]] = None ,__a : Optional[Union[bool, str]] = None ,__a : Optional[str] = None ,__a : bool = False ,) -> Optional[Any]:
"""simple docstring"""
_a : List[Any] = str(__a )
_a : Tuple = os.path.join(__a ,__a )
if os.path.isfile(__a ):
_a : List[Any] = module_file_or_url
_a : str = '''local'''
elif pretrained_model_name_or_path.count('''/''' ) == 0:
_a : int = get_diffusers_versions()
# cut ".dev0"
_a : Optional[Any] = '''v''' + '''.'''.join(__version__.split('''.''' )[:3] )
# retrieve github version that matches
if revision is None:
_a : Union[str, Any] = latest_version if latest_version[1:] in available_versions else '''main'''
logger.info(F"""Defaulting to latest_version: {revision}.""" )
elif revision in available_versions:
_a : Dict = F"""v{revision}"""
elif revision == "main":
_a : Dict = revision
else:
raise ValueError(
F"""`custom_revision`: {revision} does not exist. Please make sure to choose one of"""
F""" {', '.join(available_versions + ['main'] )}.""" )
# community pipeline on GitHub
_a : Union[str, Any] = COMMUNITY_PIPELINES_URL.format(revision=__a ,pipeline=__a )
try:
_a : int = cached_download(
__a ,cache_dir=__a ,force_download=__a ,proxies=__a ,resume_download=__a ,local_files_only=__a ,use_auth_token=__a ,)
_a : Dict = '''git'''
_a : str = pretrained_model_name_or_path + '''.py'''
except EnvironmentError:
logger.error(F"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
else:
try:
# Load from URL or cache if already cached
_a : Optional[Any] = hf_hub_download(
__a ,__a ,cache_dir=__a ,force_download=__a ,proxies=__a ,resume_download=__a ,local_files_only=__a ,use_auth_token=__a ,)
_a : Union[str, Any] = os.path.join('''local''' ,'''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) )
except EnvironmentError:
logger.error(F"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
# Check we have all the requirements in our environment
_a : Dict = check_imports(__a )
# Now we move the module inside our cached dynamic modules.
_a : int = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(__a )
_a : Tuple = Path(__a ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(__a ,submodule_path / module_file )
for module_needed in modules_needed:
_a : Any = F"""{module_needed}.py"""
shutil.copy(os.path.join(__a ,__a ) ,submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(__a ,__a ):
_a : List[str] = use_auth_token
elif use_auth_token is True:
_a : Optional[int] = HfFolder.get_token()
else:
_a : str = None
_a : List[Any] = model_info(__a ,revision=__a ,token=__a ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
_a : List[str] = submodule_path / commit_hash
_a : Tuple = full_submodule + os.path.sep + commit_hash
create_dynamic_module(__a )
if not (submodule_path / module_file).exists():
shutil.copy(__a ,submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
__a ,F"""{module_needed}.py""" ,cache_dir=__a ,force_download=__a ,resume_download=__a ,proxies=__a ,use_auth_token=__a ,revision=__a ,local_files_only=__a ,)
return os.path.join(__a ,__a )
def __UpperCAmelCase ( __a : Union[str, os.PathLike] ,__a : str ,__a : Optional[str] = None ,__a : Optional[Union[str, os.PathLike]] = None ,__a : bool = False ,__a : bool = False ,__a : Optional[Dict[str, str]] = None ,__a : Optional[Union[bool, str]] = None ,__a : Optional[str] = None ,__a : bool = False ,**__a : Tuple ,) -> int:
"""simple docstring"""
_a : Any = get_cached_module_file(
__a ,__a ,cache_dir=__a ,force_download=__a ,resume_download=__a ,proxies=__a ,use_auth_token=__a ,revision=__a ,local_files_only=__a ,)
return get_class_in_module(__a ,final_module.replace('''.py''' ,'''''' ) )
| 14 |
'''simple docstring'''
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=sys.maxsize):
'''simple docstring'''
__A : Union[str, Any] = 'bilinear'
__A : int = max_size
__A : Optional[Any] = short_edge_length
def __call__( self , _UpperCAmelCase):
'''simple docstring'''
__A : int = []
for img in imgs:
__A ,__A : Dict = img.shape[:2]
# later: provide list and randomly choose index for resize
__A : List[Any] = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1)
if size == 0:
return img
__A : Tuple = size * 1.0 / min(_UpperCAmelCase , _UpperCAmelCase)
if h < w:
__A ,__A : Optional[Any] = size, scale * w
else:
__A ,__A : Optional[Any] = scale * h, size
if max(_UpperCAmelCase , _UpperCAmelCase) > self.max_size:
__A : Tuple = self.max_size * 1.0 / max(_UpperCAmelCase , _UpperCAmelCase)
__A : Tuple = newh * scale
__A : Dict = neww * scale
__A : Dict = int(neww + 0.5)
__A : Optional[int] = int(newh + 0.5)
if img.dtype == np.uinta:
__A : int = Image.fromarray(_UpperCAmelCase)
__A : Optional[int] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR)
__A : Dict = np.asarray(_UpperCAmelCase)
else:
__A : Optional[Any] = img.permute(2 , 0 , 1).unsqueeze(0) # 3, 0, 1) # hw(c) -> nchw
__A : Dict = nn.functional.interpolate(
_UpperCAmelCase , (newh, neww) , mode=self.interp_method , align_corners=_UpperCAmelCase).squeeze(0)
img_augs.append(_UpperCAmelCase)
return img_augs
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST)
__A : List[Any] = cfg.INPUT.FORMAT
__A : Dict = cfg.SIZE_DIVISIBILITY
__A : str = cfg.PAD_VALUE
__A : Union[str, Any] = cfg.INPUT.MAX_SIZE_TEST
__A : int = cfg.MODEL.DEVICE
__A : Tuple = torch.tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
__A : Union[str, Any] = torch.tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
__A : int = lambda _UpperCAmelCase: (x - self.pixel_mean) / self.pixel_std
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = tuple(max(_UpperCAmelCase) for s in zip(*[img.shape for img in images]))
__A : Dict = [im.shape[-2:] for im in images]
__A : Optional[int] = [
nn.functional.pad(
_UpperCAmelCase , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(_UpperCAmelCase , _UpperCAmelCase)
]
return torch.stack(_UpperCAmelCase), torch.tensor(_UpperCAmelCase)
def __call__( self , _UpperCAmelCase , _UpperCAmelCase=False):
'''simple docstring'''
with torch.no_grad():
if not isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : int = [images]
if single_image:
assert len(_UpperCAmelCase) == 1
for i in range(len(_UpperCAmelCase)):
if isinstance(images[i] , torch.Tensor):
images.insert(_UpperCAmelCase , images.pop(_UpperCAmelCase).to(self.device).float())
elif not isinstance(images[i] , torch.Tensor):
images.insert(
_UpperCAmelCase , torch.as_tensor(img_tensorize(images.pop(_UpperCAmelCase) , input_format=self.input_format))
.to(self.device)
.float() , )
# resize smallest edge
__A : str = torch.tensor([im.shape[:2] for im in images])
__A : List[str] = self.aug(_UpperCAmelCase)
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
__A : Any = [self.normalizer(_UpperCAmelCase) for x in images]
# now pad them to do the following operations
__A ,__A : Any = self.pad(_UpperCAmelCase)
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
__A : str = torch.true_divide(_UpperCAmelCase , _UpperCAmelCase)
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : str ) -> Dict:
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _lowerCAmelCase ( __snake_case : List[str] , __snake_case : Tuple[int, int] ) -> int:
assert torch.isfinite(__snake_case ).all(), "Box tensor contains infinite or NaN!"
__A ,__A : int = box_size
tensor[:, 0].clamp_(min=0 , max=__snake_case )
tensor[:, 1].clamp_(min=0 , max=__snake_case )
tensor[:, 2].clamp_(min=0 , max=__snake_case )
tensor[:, 3].clamp_(min=0 , max=__snake_case ) | 8 | 0 |
import math
A : List[Any] = 1_0
A : Union[str, Any] = 7
A : List[str] = BALLS_PER_COLOUR * NUM_COLOURS
def UpperCamelCase ( __magic_name__ : int = 20 ) -> str:
"""simple docstring"""
lowercase__ = math.comb(__magic_name__ , __magic_name__ )
lowercase__ = math.comb(NUM_BALLS - BALLS_PER_COLOUR , __magic_name__ )
lowercase__ = NUM_COLOURS * (1 - missing_colour / total)
return f'''{result:.9f}'''
if __name__ == "__main__":
print(solution(2_0))
| 15 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> Optional[Any]: # noqa: E741
__A : Tuple = len(__snake_case )
__A : Optional[int] = 0
__A : str = [0] * n
__A : int = [False] * n
__A : Tuple = [False] * n
def dfs(__snake_case : List[str] , __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : int ):
if parent == root:
out_edge_count += 1
__A : str = True
__A : Tuple = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
__A : Optional[int] = dfs(__snake_case , __snake_case , __snake_case , __snake_case )
__A : int = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
__A : Tuple = True
# AP found via cycle
if at == low[to]:
__A : Optional[Any] = True
else:
__A : Any = min(low[at] , __snake_case )
return out_edge_count
for i in range(__snake_case ):
if not visited[i]:
__A : Tuple = 0
__A : List[Any] = dfs(__snake_case , __snake_case , -1 , __snake_case )
__A : Union[str, Any] = out_edge_count > 1
for x in range(len(__snake_case ) ):
if is_art[x] is True:
print(__snake_case )
# Adjacency list of graph
lowercase__ : Tuple = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data) | 8 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Dict = logging.get_logger(__name__)
__A : str = {
'caidas/swin2sr-classicalsr-x2-64': (
'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'
),
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "swin2sr"
lowerCamelCase__ = {
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Dict , __lowerCamelCase : Optional[int]=64 , __lowerCamelCase : int=1 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : Any=180 , __lowerCamelCase : str=[6, 6, 6, 6, 6, 6] , __lowerCamelCase : Dict=[6, 6, 6, 6, 6, 6] , __lowerCamelCase : Optional[Any]=8 , __lowerCamelCase : Any=2.0 , __lowerCamelCase : int=True , __lowerCamelCase : Dict=0.0 , __lowerCamelCase : Dict=0.0 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : Tuple="gelu" , __lowerCamelCase : List[str]=False , __lowerCamelCase : Any=0.02 , __lowerCamelCase : Optional[int]=1e-5 , __lowerCamelCase : List[Any]=2 , __lowerCamelCase : List[str]=1.0 , __lowerCamelCase : Union[str, Any]="1conv" , __lowerCamelCase : List[Any]="pixelshuffle" , **__lowerCamelCase : Optional[Any] , ):
super().__init__(**__lowerCamelCase )
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = embed_dim
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE = num_heads
SCREAMING_SNAKE_CASE = window_size
SCREAMING_SNAKE_CASE = mlp_ratio
SCREAMING_SNAKE_CASE = qkv_bias
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = use_absolute_embeddings
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = upscale
SCREAMING_SNAKE_CASE = img_range
SCREAMING_SNAKE_CASE = resi_connection
SCREAMING_SNAKE_CASE = upsampler | 16 |
'''simple docstring'''
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
lowercase__ : int = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn.grep_linear''': '''encoder.layers.*.attention.gru_rel_pos_linear''',
'''self_attn.relative_attention_bias''': '''encoder.layers.*.attention.rel_attn_embed''',
'''self_attn.grep_a''': '''encoder.layers.*.attention.gru_rel_pos_const''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
lowercase__ : Dict = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def _lowerCAmelCase ( __snake_case : int , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Any , __snake_case : List[str] ) -> Union[str, Any]:
for attribute in key.split('.' ):
__A : int = getattr(__snake_case , __snake_case )
if weight_type is not None:
__A : Optional[int] = getattr(__snake_case , __snake_case ).shape
else:
__A : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
__A : Tuple = value
elif weight_type == "weight_g":
__A : Union[str, Any] = value
elif weight_type == "weight_v":
__A : Optional[Any] = value
elif weight_type == "bias":
__A : Optional[int] = value
else:
__A : Optional[int] = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _lowerCAmelCase ( __snake_case : Any , __snake_case : List[str] ) -> List[Any]:
__A : Optional[Any] = []
__A : Any = fairseq_model.state_dict()
__A : Union[str, Any] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
__A : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == 'group' , )
__A : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__A : int = True
if "*" in mapped_key:
__A : Any = name.split(__snake_case )[0].split('.' )[-2]
__A : List[Any] = mapped_key.replace('*' , __snake_case )
if "weight_g" in name:
__A : Optional[Any] = 'weight_g'
elif "weight_v" in name:
__A : Union[str, Any] = 'weight_v'
elif "bias" in name and "relative_attention_bias" not in name:
__A : Optional[Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__A : Tuple = 'weight'
else:
__A : Dict = None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(f'Unused weights: {unused_weights}' )
def _lowerCAmelCase ( __snake_case : List[str] , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Optional[int] ) -> int:
__A : int = full_name.split('conv_layers.' )[-1]
__A : List[str] = name.split('.' )
__A : Optional[int] = int(items[0] )
__A : str = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
__A : Optional[int] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
__A : Union[str, Any] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
__A : Dict = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
__A : Any = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__snake_case )
@torch.no_grad()
def _lowerCAmelCase ( __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Tuple=None ) -> Any:
# load the pre-trained checkpoints
__A : List[str] = torch.load(__snake_case )
__A : Dict = WavLMConfigOrig(checkpoint['cfg'] )
__A : Optional[int] = WavLMOrig(__snake_case )
model.load_state_dict(checkpoint['model'] )
model.eval()
if config_path is not None:
__A : List[Any] = WavLMConfig.from_pretrained(__snake_case )
else:
__A : Dict = WavLMConfig()
__A : Optional[Any] = WavLMModel(__snake_case )
recursively_load_weights(__snake_case , __snake_case )
hf_wavlm.save_pretrained(__snake_case )
if __name__ == "__main__":
lowercase__ : Tuple = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowercase__ : Any = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 8 | 0 |
UpperCAmelCase_ : Optional[int] = {
0: '''0''',
1: '''1''',
2: '''2''',
3: '''3''',
4: '''4''',
5: '''5''',
6: '''6''',
7: '''7''',
8: '''8''',
9: '''9''',
10: '''a''',
11: '''b''',
12: '''c''',
13: '''d''',
14: '''e''',
15: '''f''',
}
def __SCREAMING_SNAKE_CASE ( a__ : float ) -> str:
assert type(a__ ) in (int, float) and decimal == int(a__ )
__A : List[str] = int(a__ )
__A : List[Any] = """"""
__A : int = False
if decimal < 0:
__A : Tuple = True
decimal *= -1
while decimal > 0:
__A , __A : List[Any] = divmod(a__ ,16 )
__A : Union[str, Any] = values[remainder] + hexadecimal
__A : Optional[Any] = """0x""" + hexadecimal
if negative:
__A : Tuple = """-""" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = 42
class SCREAMING_SNAKE_CASE (a__ , a__ ):
@register_to_config
def __init__( self , _UpperCAmelCase = 6_5536 , _UpperCAmelCase = None , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , _UpperCAmelCase = 0 , _UpperCAmelCase = "fourier" , _UpperCAmelCase = True , _UpperCAmelCase = False , _UpperCAmelCase = 0.0 , _UpperCAmelCase = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , _UpperCAmelCase = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , _UpperCAmelCase = "UNetMidBlock1D" , _UpperCAmelCase = None , _UpperCAmelCase = (32, 32, 64) , _UpperCAmelCase = None , _UpperCAmelCase = 8 , _UpperCAmelCase = 1 , _UpperCAmelCase = False , ):
'''simple docstring'''
super().__init__()
__A : Dict = sample_size
# time
if time_embedding_type == "fourier":
__A : int = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=_UpperCAmelCase , log=_UpperCAmelCase , flip_sin_to_cos=_UpperCAmelCase)
__A : Any = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
__A : List[str] = Timesteps(
block_out_channels[0] , flip_sin_to_cos=_UpperCAmelCase , downscale_freq_shift=_UpperCAmelCase)
__A : List[str] = block_out_channels[0]
if use_timestep_embedding:
__A : Optional[Any] = block_out_channels[0] * 4
__A : Optional[int] = TimestepEmbedding(
in_channels=_UpperCAmelCase , time_embed_dim=_UpperCAmelCase , act_fn=_UpperCAmelCase , out_dim=block_out_channels[0] , )
__A : Dict = nn.ModuleList([])
__A : Dict = None
__A : Tuple = nn.ModuleList([])
__A : Tuple = None
# down
__A : Any = in_channels
for i, down_block_type in enumerate(_UpperCAmelCase):
__A : Tuple = output_channel
__A : Optional[Any] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
__A : List[str] = i == len(_UpperCAmelCase) - 1
__A : int = get_down_block(
_UpperCAmelCase , num_layers=_UpperCAmelCase , in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(_UpperCAmelCase)
# mid
__A : str = get_mid_block(
_UpperCAmelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=_UpperCAmelCase , add_downsample=_UpperCAmelCase , )
# up
__A : Optional[int] = list(reversed(_UpperCAmelCase))
__A : Optional[int] = reversed_block_out_channels[0]
if out_block_type is None:
__A : str = out_channels
else:
__A : List[Any] = block_out_channels[0]
for i, up_block_type in enumerate(_UpperCAmelCase):
__A : Optional[Any] = output_channel
__A : Optional[Any] = (
reversed_block_out_channels[i + 1] if i < len(_UpperCAmelCase) - 1 else final_upsample_channels
)
__A : Dict = i == len(_UpperCAmelCase) - 1
__A : str = get_up_block(
_UpperCAmelCase , num_layers=_UpperCAmelCase , in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(_UpperCAmelCase)
__A : Optional[int] = output_channel
# out
__A : str = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32)
__A : Optional[Any] = get_out_block(
out_block_type=_UpperCAmelCase , num_groups_out=_UpperCAmelCase , embed_dim=block_out_channels[0] , out_channels=_UpperCAmelCase , act_fn=_UpperCAmelCase , fc_dim=block_out_channels[-1] // 4 , )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True , ):
'''simple docstring'''
__A : Any = timestep
if not torch.is_tensor(_UpperCAmelCase):
__A : Any = torch.tensor([timesteps] , dtype=torch.long , device=sample.device)
elif torch.is_tensor(_UpperCAmelCase) and len(timesteps.shape) == 0:
__A : Any = timesteps[None].to(sample.device)
__A : List[Any] = self.time_proj(_UpperCAmelCase)
if self.config.use_timestep_embedding:
__A : Dict = self.time_mlp(_UpperCAmelCase)
else:
__A : Dict = timestep_embed[..., None]
__A : Tuple = timestep_embed.repeat([1, 1, sample.shape[2]]).to(sample.dtype)
__A : List[Any] = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]))
# 2. down
__A : int = ()
for downsample_block in self.down_blocks:
__A ,__A : int = downsample_block(hidden_states=_UpperCAmelCase , temb=_UpperCAmelCase)
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
__A : Optional[int] = self.mid_block(_UpperCAmelCase , _UpperCAmelCase)
# 4. up
for i, upsample_block in enumerate(self.up_blocks):
__A : Any = down_block_res_samples[-1:]
__A : Optional[int] = down_block_res_samples[:-1]
__A : Any = upsample_block(_UpperCAmelCase , res_hidden_states_tuple=_UpperCAmelCase , temb=_UpperCAmelCase)
# 5. post-process
if self.out_block:
__A : Dict = self.out_block(_UpperCAmelCase , _UpperCAmelCase)
if not return_dict:
return (sample,)
return UNetaDOutput(sample=_UpperCAmelCase) | 8 | 0 |
'''simple docstring'''
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"facebook/data2vec-base-960h": "https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : List[Any] = "data2vec-audio"
def __init__( self , _lowerCAmelCase=32 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-5 , _lowerCAmelCase="gelu" , _lowerCAmelCase=(512, 512, 512, 512, 512, 512, 512) , _lowerCAmelCase=(5, 2, 2, 2, 2, 2, 2) , _lowerCAmelCase=(10, 3, 3, 3, 3, 2, 2) , _lowerCAmelCase=False , _lowerCAmelCase=16 , _lowerCAmelCase=19 , _lowerCAmelCase=5 , _lowerCAmelCase=0.05 , _lowerCAmelCase=10 , _lowerCAmelCase=2 , _lowerCAmelCase=0.0 , _lowerCAmelCase=10 , _lowerCAmelCase=0 , _lowerCAmelCase="sum" , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=256 , _lowerCAmelCase=(512, 512, 512, 512, 1500) , _lowerCAmelCase=(5, 3, 3, 1, 1) , _lowerCAmelCase=(1, 2, 3, 1, 1) , _lowerCAmelCase=512 , _lowerCAmelCase=0 , _lowerCAmelCase=1 , _lowerCAmelCase=2 , _lowerCAmelCase=False , _lowerCAmelCase=3 , _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase=None , **_lowerCAmelCase , ) -> Dict:
super().__init__(**_lowerCAmelCase , pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase )
_lowerCAmelCase = hidden_size
_lowerCAmelCase = feat_extract_activation
_lowerCAmelCase = list(_lowerCAmelCase )
_lowerCAmelCase = list(_lowerCAmelCase )
_lowerCAmelCase = list(_lowerCAmelCase )
_lowerCAmelCase = conv_bias
_lowerCAmelCase = num_conv_pos_embeddings
_lowerCAmelCase = num_conv_pos_embedding_groups
_lowerCAmelCase = conv_pos_kernel_size
_lowerCAmelCase = len(self.conv_dim )
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = activation_dropout
_lowerCAmelCase = feat_proj_dropout
_lowerCAmelCase = final_dropout
_lowerCAmelCase = layerdrop
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = initializer_range
_lowerCAmelCase = vocab_size
_lowerCAmelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase = mask_time_prob
_lowerCAmelCase = mask_time_length
_lowerCAmelCase = mask_time_min_masks
_lowerCAmelCase = mask_feature_prob
_lowerCAmelCase = mask_feature_length
_lowerCAmelCase = mask_feature_min_masks
# ctc loss
_lowerCAmelCase = ctc_loss_reduction
_lowerCAmelCase = ctc_zero_infinity
# adapter
_lowerCAmelCase = add_adapter
_lowerCAmelCase = adapter_kernel_size
_lowerCAmelCase = adapter_stride
_lowerCAmelCase = num_adapter_layers
_lowerCAmelCase = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCAmelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCAmelCase = list(_lowerCAmelCase )
_lowerCAmelCase = list(_lowerCAmelCase )
_lowerCAmelCase = list(_lowerCAmelCase )
_lowerCAmelCase = xvector_output_dim
@property
def _snake_case ( self ) -> str:
return math.prod(self.conv_stride )
| 18 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : str , __snake_case : str ) -> int:
if len(__snake_case ) != len(__snake_case ):
raise ValueError('String lengths must match!' )
__A : Optional[Any] = 0
for chara, chara in zip(__snake_case , __snake_case ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowercase__ = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase__ = False
lowercase__ = False
def UpperCAmelCase ( self , __a , __a , __a=False) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = super()._prepare_for_class(__a , __a , return_labels=__a)
if return_labels:
if model_class in get_values(__a):
_UpperCamelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa)
return inputs_dict
class _UpperCAmelCase( lowerCamelCase ):
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=32 , __a=2 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = scope
_UpperCamelCase = embedding_size
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices)
_UpperCamelCase = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Any:
'''simple docstring'''
_UpperCamelCase = TFMobileBertModel(config=__a)
_UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCamelCase = model(__a)
_UpperCamelCase = [input_ids, input_mask]
_UpperCamelCase = model(__a)
_UpperCamelCase = model(__a)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Any:
'''simple docstring'''
_UpperCamelCase = TFMobileBertForMaskedLM(config=__a)
_UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCamelCase = model(__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Dict:
'''simple docstring'''
_UpperCamelCase = TFMobileBertForNextSentencePrediction(config=__a)
_UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCamelCase = model(__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Tuple:
'''simple docstring'''
_UpperCamelCase = TFMobileBertForPreTraining(config=__a)
_UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCamelCase = model(__a)
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.num_labels
_UpperCamelCase = TFMobileBertForSequenceClassification(config=__a)
_UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCamelCase = model(__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.num_choices
_UpperCamelCase = TFMobileBertForMultipleChoice(config=__a)
_UpperCamelCase = tf.tile(tf.expand_dims(__a , 1) , (1, self.num_choices, 1))
_UpperCamelCase = tf.tile(tf.expand_dims(__a , 1) , (1, self.num_choices, 1))
_UpperCamelCase = tf.tile(tf.expand_dims(__a , 1) , (1, self.num_choices, 1))
_UpperCamelCase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
_UpperCamelCase = model(__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.num_labels
_UpperCamelCase = TFMobileBertForTokenClassification(config=__a)
_UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCamelCase = model(__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Any:
'''simple docstring'''
_UpperCamelCase = TFMobileBertForQuestionAnswering(config=__a)
_UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCamelCase = model(__a)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = TFMobileBertModelTest.TFMobileBertModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a , hidden_size=37)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__a)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__a)
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__a)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__a)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__a)
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__a)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__a)
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__a)
@slow
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
_UpperCamelCase = TFMobileBertModel.from_pretrained(__a)
self.assertIsNotNone(__a)
@require_tf
class _UpperCAmelCase( unittest.TestCase ):
@slow
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''')
_UpperCamelCase = tf.constant([[0, 1, 2, 3, 4, 5]])
_UpperCamelCase = model(__a)[0]
_UpperCamelCase = [1, 6, 3_05_22]
self.assertEqual(output.shape , __a)
_UpperCamelCase = tf.constant(
[
[
[-4.591_9547, -9.24_8295, -9.64_5256],
[-6.730_6175, -6.44_0284, -6.605_2837],
[-7.274_3506, -6.784_7915, -6.02_4673],
]
])
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-4)
| 19 |
'''simple docstring'''
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Tuple = logging.get_logger(__name__)
def _lowerCAmelCase ( __snake_case : str , __snake_case : str ) -> Union[str, Any]:
__A : int = RobertaPreLayerNormConfig.from_pretrained(
__snake_case , architectures=['RobertaPreLayerNormForMaskedLM'] )
# convert state_dict
__A : Tuple = torch.load(hf_hub_download(repo_id=__snake_case , filename='pytorch_model.bin' ) )
__A : str = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('roberta.' ):
__A : Dict = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ):
continue
__A : str = tensor_value
__A : Union[str, Any] = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=__snake_case , config=__snake_case , state_dict=__snake_case )
model.save_pretrained(__snake_case )
# convert tokenizer
__A : List[Any] = AutoTokenizer.from_pretrained(__snake_case )
tokenizer.save_pretrained(__snake_case )
if __name__ == "__main__":
lowercase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint-repo''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowercase__ : Optional[Any] = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path) | 8 | 0 |
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
_lowerCAmelCase: Union[str, Any] = logging.get_logger('transformers.models.encodec')
_lowerCAmelCase: List[Any] = {
'quantizer.vq.layers.*._codebook.inited': 'quantizer.layers.*.codebook.inited',
'quantizer.vq.layers.*._codebook.cluster_size': 'quantizer.layers.*.codebook.cluster_size',
'quantizer.vq.layers.*._codebook.embed': 'quantizer.layers.*.codebook.embed',
'quantizer.vq.layers.*._codebook.embed_avg': 'quantizer.layers.*.codebook.embed_avg',
}
_lowerCAmelCase: str = {
'encoder.model.0.conv.conv': 'encoder.layers.0.conv',
'encoder.model.1.block.1.conv.conv': 'encoder.layers.1.block.1.conv',
'encoder.model.1.block.3.conv.conv': 'encoder.layers.1.block.3.conv',
'encoder.model.1.shortcut.conv.conv': 'encoder.layers.1.shortcut.conv',
'encoder.model.3.conv.conv': 'encoder.layers.3.conv',
'encoder.model.4.block.1.conv.conv': 'encoder.layers.4.block.1.conv',
'encoder.model.4.block.3.conv.conv': 'encoder.layers.4.block.3.conv',
'encoder.model.4.shortcut.conv.conv': 'encoder.layers.4.shortcut.conv',
'encoder.model.6.conv.conv': 'encoder.layers.6.conv',
'encoder.model.7.block.1.conv.conv': 'encoder.layers.7.block.1.conv',
'encoder.model.7.block.3.conv.conv': 'encoder.layers.7.block.3.conv',
'encoder.model.7.shortcut.conv.conv': 'encoder.layers.7.shortcut.conv',
'encoder.model.9.conv.conv': 'encoder.layers.9.conv',
'encoder.model.10.block.1.conv.conv': 'encoder.layers.10.block.1.conv',
'encoder.model.10.block.3.conv.conv': 'encoder.layers.10.block.3.conv',
'encoder.model.10.shortcut.conv.conv': 'encoder.layers.10.shortcut.conv',
'encoder.model.12.conv.conv': 'encoder.layers.12.conv',
'encoder.model.13.lstm': 'encoder.layers.13.lstm',
'encoder.model.15.conv.conv': 'encoder.layers.15.conv',
}
_lowerCAmelCase: Optional[int] = {
'encoder.model.0.conv.norm': 'encoder.layers.0.norm',
'encoder.model.1.block.1.conv.norm': 'encoder.layers.1.block.1.norm',
'encoder.model.1.block.3.conv.norm': 'encoder.layers.1.block.3.norm',
'encoder.model.1.shortcut.conv.norm': 'encoder.layers.1.shortcut.norm',
'encoder.model.3.conv.norm': 'encoder.layers.3.norm',
'encoder.model.4.block.1.conv.norm': 'encoder.layers.4.block.1.norm',
'encoder.model.4.block.3.conv.norm': 'encoder.layers.4.block.3.norm',
'encoder.model.4.shortcut.conv.norm': 'encoder.layers.4.shortcut.norm',
'encoder.model.6.conv.norm': 'encoder.layers.6.norm',
'encoder.model.7.block.1.conv.norm': 'encoder.layers.7.block.1.norm',
'encoder.model.7.block.3.conv.norm': 'encoder.layers.7.block.3.norm',
'encoder.model.7.shortcut.conv.norm': 'encoder.layers.7.shortcut.norm',
'encoder.model.9.conv.norm': 'encoder.layers.9.norm',
'encoder.model.10.block.1.conv.norm': 'encoder.layers.10.block.1.norm',
'encoder.model.10.block.3.conv.norm': 'encoder.layers.10.block.3.norm',
'encoder.model.10.shortcut.conv.norm': 'encoder.layers.10.shortcut.norm',
'encoder.model.12.conv.norm': 'encoder.layers.12.norm',
'encoder.model.15.conv.norm': 'encoder.layers.15.norm',
}
_lowerCAmelCase: Dict = {
'decoder.model.0.conv.conv': 'decoder.layers.0.conv',
'decoder.model.1.lstm': 'decoder.layers.1.lstm',
'decoder.model.3.convtr.convtr': 'decoder.layers.3.conv',
'decoder.model.4.block.1.conv.conv': 'decoder.layers.4.block.1.conv',
'decoder.model.4.block.3.conv.conv': 'decoder.layers.4.block.3.conv',
'decoder.model.4.shortcut.conv.conv': 'decoder.layers.4.shortcut.conv',
'decoder.model.6.convtr.convtr': 'decoder.layers.6.conv',
'decoder.model.7.block.1.conv.conv': 'decoder.layers.7.block.1.conv',
'decoder.model.7.block.3.conv.conv': 'decoder.layers.7.block.3.conv',
'decoder.model.7.shortcut.conv.conv': 'decoder.layers.7.shortcut.conv',
'decoder.model.9.convtr.convtr': 'decoder.layers.9.conv',
'decoder.model.10.block.1.conv.conv': 'decoder.layers.10.block.1.conv',
'decoder.model.10.block.3.conv.conv': 'decoder.layers.10.block.3.conv',
'decoder.model.10.shortcut.conv.conv': 'decoder.layers.10.shortcut.conv',
'decoder.model.12.convtr.convtr': 'decoder.layers.12.conv',
'decoder.model.13.block.1.conv.conv': 'decoder.layers.13.block.1.conv',
'decoder.model.13.block.3.conv.conv': 'decoder.layers.13.block.3.conv',
'decoder.model.13.shortcut.conv.conv': 'decoder.layers.13.shortcut.conv',
'decoder.model.15.conv.conv': 'decoder.layers.15.conv',
}
_lowerCAmelCase: Tuple = {
'decoder.model.0.conv.norm': 'decoder.layers.0.norm',
'decoder.model.3.convtr.norm': 'decoder.layers.3.norm',
'decoder.model.4.block.1.conv.norm': 'decoder.layers.4.block.1.norm',
'decoder.model.4.block.3.conv.norm': 'decoder.layers.4.block.3.norm',
'decoder.model.4.shortcut.conv.norm': 'decoder.layers.4.shortcut.norm',
'decoder.model.6.convtr.norm': 'decoder.layers.6.norm',
'decoder.model.7.block.1.conv.norm': 'decoder.layers.7.block.1.norm',
'decoder.model.7.block.3.conv.norm': 'decoder.layers.7.block.3.norm',
'decoder.model.7.shortcut.conv.norm': 'decoder.layers.7.shortcut.norm',
'decoder.model.9.convtr.norm': 'decoder.layers.9.norm',
'decoder.model.10.block.1.conv.norm': 'decoder.layers.10.block.1.norm',
'decoder.model.10.block.3.conv.norm': 'decoder.layers.10.block.3.norm',
'decoder.model.10.shortcut.conv.norm': 'decoder.layers.10.shortcut.norm',
'decoder.model.12.convtr.norm': 'decoder.layers.12.norm',
'decoder.model.13.block.1.conv.norm': 'decoder.layers.13.block.1.norm',
'decoder.model.13.block.3.conv.norm': 'decoder.layers.13.block.3.norm',
'decoder.model.13.shortcut.conv.norm': 'decoder.layers.13.shortcut.norm',
'decoder.model.15.conv.norm': 'decoder.layers.15.norm',
}
_lowerCAmelCase: str = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
_lowerCAmelCase: Dict = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
_lowerCAmelCase: Tuple = []
_lowerCAmelCase: str = []
def _lowercase( __a : Any , __a : List[str] , __a : Tuple , __a : Union[str, Any] , __a : Optional[Any] ):
for attribute in key.split('.' ):
a__ =getattr(__a , __a )
if weight_type is not None:
a__ =getattr(__a , __a ).shape
else:
a__ =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
a__ =value
elif weight_type == "weight_g":
a__ =value
elif weight_type == "weight_v":
a__ =value
elif weight_type == "bias":
a__ =value
elif weight_type == "running_mean":
a__ =value
elif weight_type == "running_var":
a__ =value
elif weight_type == "num_batches_tracked":
a__ =value
elif weight_type == "weight_ih_l0":
a__ =value
elif weight_type == "weight_hh_l0":
a__ =value
elif weight_type == "bias_ih_l0":
a__ =value
elif weight_type == "bias_hh_l0":
a__ =value
elif weight_type == "weight_ih_l1":
a__ =value
elif weight_type == "weight_hh_l1":
a__ =value
elif weight_type == "bias_ih_l1":
a__ =value
elif weight_type == "bias_hh_l1":
a__ =value
else:
a__ =value
logger.info(f"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" )
def _lowercase( __a : Optional[int] , __a : Union[str, Any] ):
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
a__ , a__ =key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _lowercase( __a : str , __a : int , __a : Tuple ):
a__ =[]
if model_name == "encodec_24khz" or "encodec_32khz":
a__ =MAPPING_24K
elif model_name == "encodec_48khz":
a__ =MAPPING_48K
else:
raise ValueError(f"""Unsupported model: {model_name}""" )
for name, value in orig_dict.items():
if should_ignore(__a , __a ):
logger.info(f"""{name} was ignored""" )
continue
a__ =False
for key, mapped_key in MAPPING.items():
if "*" in key:
a__ , a__ =key.split('.*.' )
if prefix in name and suffix in name:
a__ =suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('embed' ) and name.endswith('embed_avg' ):
continue
a__ =True
if "*" in mapped_key:
a__ =name.split(__a )[0].split('.' )[-2]
a__ =mapped_key.replace('*' , __a )
if "weight_g" in name:
a__ ='weight_g'
elif "weight_v" in name:
a__ ='weight_v'
elif "weight_ih_l0" in name:
a__ ='weight_ih_l0'
elif "weight_hh_l0" in name:
a__ ='weight_hh_l0'
elif "bias_ih_l0" in name:
a__ ='bias_ih_l0'
elif "bias_hh_l0" in name:
a__ ='bias_hh_l0'
elif "weight_ih_l1" in name:
a__ ='weight_ih_l1'
elif "weight_hh_l1" in name:
a__ ='weight_hh_l1'
elif "bias_ih_l1" in name:
a__ ='bias_ih_l1'
elif "bias_hh_l1" in name:
a__ ='bias_hh_l1'
elif "bias" in name:
a__ ='bias'
elif "weight" in name:
a__ ='weight'
elif "running_mean" in name:
a__ ='running_mean'
elif "running_var" in name:
a__ ='running_var'
elif "num_batches_tracked" in name:
a__ ='num_batches_tracked'
else:
a__ =None
set_recursively(__a , __a , __a , __a , __a )
continue
if not is_used:
unused_weights.append(__a )
logger.warning(f"""Unused weights: {unused_weights}""" )
@torch.no_grad()
def _lowercase( __a : str , __a : int , __a : str , __a : Tuple=None , __a : Optional[int]=None , ):
if config_path is not None:
a__ =EncodecConfig.from_pretrained(__a )
else:
a__ =EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
a__ =[8, 5, 4, 4]
a__ =[2.2]
a__ =64
a__ =3_2000
a__ =2048
a__ =False
a__ =False
a__ =False
elif model_name == "encodec_48khz":
a__ =[8, 5, 4, 2]
a__ =[3.0, 6.0, 12.0, 24.0]
a__ =4_8000
a__ =2
a__ =False
a__ ='time_group_norm'
a__ =True
a__ =1.0
a__ =0.01
else:
raise ValueError(f"""Unknown model name: {model_name}""" )
a__ =EncodecModel(__a )
a__ =EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(__a )
a__ =torch.load(__a )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
a__ =original_checkpoint['best_state']
recursively_load_weights(__a , __a , __a )
model.save_pretrained(__a )
if repo_id:
print('Pushing to the hub...' )
feature_extractor.push_to_hub(__a )
model.push_to_hub(__a )
if __name__ == "__main__":
_lowerCAmelCase: str = argparse.ArgumentParser()
parser.add_argument(
'--model',
default='encodec_24khz',
type=str,
help='The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
_lowerCAmelCase: str = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 20 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
lowercase__ : Dict = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = field(default=a__ , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
lowerCAmelCase = field(
default=a__ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
lowerCAmelCase = field(
default=a__ , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
lowerCAmelCase = field(
default=a__ , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
lowerCAmelCase = field(
default=a__ , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = super().to_dict()
for k, v in d.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : List[Any] = v.to_dict()
return d | 8 | 0 |
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class __A ( UpperCamelCase__ , unittest.TestCase ):
UpperCamelCase = RoFormerTokenizer
UpperCamelCase = RoFormerTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def A__ ( self :List[str] ):
'''simple docstring'''
super().setUp()
def A__ ( self :List[str] , **__snake_case :str ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **__snake_case )
def A__ ( self :str , **__snake_case :Dict ):
'''simple docstring'''
return self.rust_tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **__snake_case )
def A__ ( self :Optional[int] ):
'''simple docstring'''
__magic_name__ : List[str] ="""永和服装饰品有限公司,今天天气非常好"""
__magic_name__ : Any ="""永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"""
return input_text, output_text
def A__ ( self :Dict ):
'''simple docstring'''
__magic_name__ : Optional[Any] =self.get_tokenizer()
__magic_name__ , __magic_name__ : Optional[int] =self.get_chinese_input_output_texts()
__magic_name__ : Any =tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , output_text.split() )
__magic_name__ : Any =tokens + [tokenizer.unk_token]
__magic_name__ : Union[str, Any] =[2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : str =self.get_rust_tokenizer()
__magic_name__ , __magic_name__ : Any =self.get_chinese_input_output_texts()
__magic_name__ : Any =tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , output_text.split() )
__magic_name__ : Optional[int] =tokens + [tokenizer.unk_token]
__magic_name__ : Any =[2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def A__ ( self :Tuple ):
'''simple docstring'''
pass
def A__ ( self :Dict ):
'''simple docstring'''
pass
def A__ ( self :Tuple ):
'''simple docstring'''
pass
| 21 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : List[Any] = logging.get_logger(__name__)
lowercase__ : Optional[int] = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''lxmert'''
lowerCAmelCase = {}
def __init__( self , _UpperCAmelCase=3_0522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=9500 , _UpperCAmelCase=1600 , _UpperCAmelCase=400 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=9 , _UpperCAmelCase=5 , _UpperCAmelCase=5 , _UpperCAmelCase=2048 , _UpperCAmelCase=4 , _UpperCAmelCase=6.67 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , **_UpperCAmelCase , ):
'''simple docstring'''
__A : Tuple = vocab_size
__A : int = hidden_size
__A : str = num_attention_heads
__A : Tuple = hidden_act
__A : int = intermediate_size
__A : str = hidden_dropout_prob
__A : Optional[int] = attention_probs_dropout_prob
__A : Optional[Any] = max_position_embeddings
__A : Tuple = type_vocab_size
__A : Optional[int] = initializer_range
__A : Any = layer_norm_eps
__A : Optional[Any] = num_qa_labels
__A : Optional[int] = num_object_labels
__A : Any = num_attr_labels
__A : Union[str, Any] = l_layers
__A : Optional[int] = x_layers
__A : List[Any] = r_layers
__A : Tuple = visual_feat_dim
__A : Tuple = visual_pos_dim
__A : Optional[int] = visual_loss_normalizer
__A : int = task_matched
__A : List[Any] = task_mask_lm
__A : Optional[Any] = task_obj_predict
__A : str = task_qa
__A : List[Any] = visual_obj_loss
__A : Optional[Any] = visual_attr_loss
__A : Union[str, Any] = visual_feat_loss
__A : Union[str, Any] = {'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers}
super().__init__(**_UpperCAmelCase) | 8 | 0 |
'''simple docstring'''
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class A ( nn.Module ):
lowercase_ = 42
lowercase_ = 42
lowercase_ = 0.0
lowercase_ = 1
lowercase_ = 1
lowercase_ = True
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = jnp.floataa
def __lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
_a = []
_a = []
for i in range(self.num_layers ):
_a = self.in_channels if i == 0 else self.out_channels
_a = FlaxResnetBlockaD(
in_channels=lowerCAmelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase_ )
_a = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowerCAmelCase_ )
_a = resnets
_a = attentions
if self.add_downsample:
_a = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str=True ) -> str:
"""simple docstring"""
_a = ()
for resnet, attn in zip(self.resnets , self.attentions ):
_a = resnet(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ )
_a = attn(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
_a = self.downsamplers_a(lowerCAmelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class A ( nn.Module ):
lowercase_ = 42
lowercase_ = 42
lowercase_ = 0.0
lowercase_ = 1
lowercase_ = True
lowercase_ = jnp.floataa
def __lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
_a = []
for i in range(self.num_layers ):
_a = self.in_channels if i == 0 else self.out_channels
_a = FlaxResnetBlockaD(
in_channels=lowerCAmelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase_ )
_a = resnets
if self.add_downsample:
_a = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str]=True ) -> Optional[Any]:
"""simple docstring"""
_a = ()
for resnet in self.resnets:
_a = resnet(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
_a = self.downsamplers_a(lowerCAmelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class A ( nn.Module ):
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
lowercase_ = 0.0
lowercase_ = 1
lowercase_ = 1
lowercase_ = True
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = jnp.floataa
def __lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
_a = []
_a = []
for i in range(self.num_layers ):
_a = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_a = self.prev_output_channel if i == 0 else self.out_channels
_a = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase_ )
_a = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowerCAmelCase_ )
_a = resnets
_a = attentions
if self.add_upsample:
_a = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int]=True ) -> int:
"""simple docstring"""
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
_a = res_hidden_states_tuple[-1]
_a = res_hidden_states_tuple[:-1]
_a = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_a = resnet(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ )
_a = attn(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ )
if self.add_upsample:
_a = self.upsamplers_a(lowerCAmelCase_ )
return hidden_states
class A ( nn.Module ):
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
lowercase_ = 0.0
lowercase_ = 1
lowercase_ = True
lowercase_ = jnp.floataa
def __lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
_a = []
for i in range(self.num_layers ):
_a = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_a = self.prev_output_channel if i == 0 else self.out_channels
_a = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase_ )
_a = resnets
if self.add_upsample:
_a = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int]=True ) -> Optional[Any]:
"""simple docstring"""
for resnet in self.resnets:
# pop res hidden states
_a = res_hidden_states_tuple[-1]
_a = res_hidden_states_tuple[:-1]
_a = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_a = resnet(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ )
if self.add_upsample:
_a = self.upsamplers_a(lowerCAmelCase_ )
return hidden_states
class A ( nn.Module ):
lowercase_ = 42
lowercase_ = 0.0
lowercase_ = 1
lowercase_ = 1
lowercase_ = False
lowercase_ = False
lowercase_ = jnp.floataa
def __lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_a = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
_a = []
for _ in range(self.num_layers ):
_a = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowerCAmelCase_ )
_a = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase_ )
_a = resnets
_a = attentions
def __call__( self : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int]=True ) -> List[str]:
"""simple docstring"""
_a = self.resnets[0](lowerCAmelCase_ , lowerCAmelCase_ )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
_a = attn(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ )
_a = resnet(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ )
return hidden_states
| 22 |
'''simple docstring'''
import math
import sys
def _lowerCAmelCase ( __snake_case : int ) -> int:
if number != int(__snake_case ):
raise ValueError('the value of input must be a natural number' )
if number < 0:
raise ValueError('the value of input must not be a negative number' )
if number == 0:
return 1
__A : str = [-1] * (number + 1)
__A : Dict = 0
for i in range(1 , number + 1 ):
__A : int = sys.maxsize
__A : int = int(math.sqrt(__snake_case ) )
for j in range(1 , root + 1 ):
__A : str = 1 + answers[i - (j**2)]
__A : Dict = min(__snake_case , __snake_case )
__A : Union[str, Any] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 0 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ : List[str] = logging.get_logger(__name__)
snake_case__ : int = """https://openaipublic.azureedge.net/jukebox/models/"""
snake_case__ : List[Any] = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def _snake_case (__lowercase):
if key.endswith('.model.1.bias') and len(key.split('.')) > 10:
UpperCamelCase_ = key.replace('.model.1.bias' , '.conv1d_1.bias')
elif key.endswith('.model.1.weight') and len(key.split('.')) > 10:
UpperCamelCase_ = key.replace('.model.1.weight' , '.conv1d_1.weight')
elif key.endswith('.model.3.bias') and len(key.split('.')) > 10:
UpperCamelCase_ = key.replace('.model.3.bias' , '.conv1d_2.bias')
elif key.endswith('.model.3.weight') and len(key.split('.')) > 10:
UpperCamelCase_ = key.replace('.model.3.weight' , '.conv1d_2.weight')
if "conditioner_blocks.0." in key:
UpperCamelCase_ = key.replace('conditioner_blocks.0' , 'conditioner_blocks')
if "prime_prior" in key:
UpperCamelCase_ = key.replace('prime_prior' , 'encoder')
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
UpperCamelCase_ = key.replace('.emb.' , '.')
if key.endswith('k'): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' , '.codebook')
if "y_emb." in key:
return key.replace('y_emb.' , 'metadata_embedding.')
if "x_emb.emb." in key:
UpperCamelCase_ = key.replace('0.x_emb.emb' , 'embed_tokens')
if "prime_state_ln" in key:
return key.replace('prime_state_ln' , 'encoder.final_layer_norm')
if ".ln" in key:
return key.replace('.ln' , '.layer_norm')
if "_ln" in key:
return key.replace('_ln' , '_layer_norm')
if "prime_state_proj" in key:
return key.replace('prime_state_proj' , 'encoder.proj_in')
if "prime_x_out" in key:
return key.replace('prime_x_out' , 'encoder.lm_head')
if "prior.x_out" in key:
return key.replace('x_out' , 'fc_proj_out')
if "x_emb" in key:
return key.replace('x_emb' , 'embed_tokens')
return key
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase):
UpperCamelCase_ = {}
import re
UpperCamelCase_ = re.compile(r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)')
UpperCamelCase_ = re.compile(
r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)')
UpperCamelCase_ = re.compile(r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)')
UpperCamelCase_ = re.compile(r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)')
UpperCamelCase_ = re.compile(
r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)')
UpperCamelCase_ = re.compile(r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)')
UpperCamelCase_ = re.compile(r'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)')
UpperCamelCase_ = re.compile(
r'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)')
UpperCamelCase_ = re.compile(r'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)')
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(__lowercase):
UpperCamelCase_ = re_encoder_block_conv_in.match(__lowercase)
UpperCamelCase_ = regex_match.groups()
UpperCamelCase_ = int(groups[2]) * 2 + int(groups[3])
UpperCamelCase_ = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
UpperCamelCase_ = re_encoder_block_conv_in.sub(__lowercase , __lowercase)
elif re_encoder_block_resnet.fullmatch(__lowercase):
UpperCamelCase_ = re_encoder_block_resnet.match(__lowercase)
UpperCamelCase_ = regex_match.groups()
UpperCamelCase_ = int(groups[2]) * 2 + int(groups[3])
UpperCamelCase_ = {'1': 1, '3': 2}[groups[-2]]
UpperCamelCase_ = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
UpperCamelCase_ = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
UpperCamelCase_ = prefix + resnet_block
UpperCamelCase_ = re_encoder_block_resnet.sub(__lowercase , __lowercase)
elif re_encoder_block_proj_out.fullmatch(__lowercase):
UpperCamelCase_ = re_encoder_block_proj_out.match(__lowercase)
UpperCamelCase_ = regex_match.groups()
UpperCamelCase_ = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
UpperCamelCase_ = re_encoder_block_proj_out.sub(__lowercase , __lowercase)
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(__lowercase):
UpperCamelCase_ = re_decoder_block_conv_out.match(__lowercase)
UpperCamelCase_ = regex_match.groups()
UpperCamelCase_ = int(groups[2]) * 2 + int(groups[3]) - 2
UpperCamelCase_ = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
UpperCamelCase_ = re_decoder_block_conv_out.sub(__lowercase , __lowercase)
elif re_decoder_block_resnet.fullmatch(__lowercase):
UpperCamelCase_ = re_decoder_block_resnet.match(__lowercase)
UpperCamelCase_ = regex_match.groups()
UpperCamelCase_ = int(groups[2]) * 2 + int(groups[3]) - 2
UpperCamelCase_ = {'1': 1, '3': 2}[groups[-2]]
UpperCamelCase_ = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
UpperCamelCase_ = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
UpperCamelCase_ = prefix + resnet_block
UpperCamelCase_ = re_decoder_block_resnet.sub(__lowercase , __lowercase)
elif re_decoder_block_proj_in.fullmatch(__lowercase):
UpperCamelCase_ = re_decoder_block_proj_in.match(__lowercase)
UpperCamelCase_ = regex_match.groups()
UpperCamelCase_ = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
UpperCamelCase_ = re_decoder_block_proj_in.sub(__lowercase , __lowercase)
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(__lowercase):
UpperCamelCase_ = re_prior_cond_conv_out.match(__lowercase)
UpperCamelCase_ = regex_match.groups()
UpperCamelCase_ = int(groups[1]) * 2 + int(groups[2]) - 2
UpperCamelCase_ = f"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
UpperCamelCase_ = re_prior_cond_conv_out.sub(__lowercase , __lowercase)
elif re_prior_cond_resnet.fullmatch(__lowercase):
UpperCamelCase_ = re_prior_cond_resnet.match(__lowercase)
UpperCamelCase_ = regex_match.groups()
UpperCamelCase_ = int(groups[1]) * 2 + int(groups[2]) - 2
UpperCamelCase_ = {'1': 1, '3': 2}[groups[-2]]
UpperCamelCase_ = f"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
UpperCamelCase_ = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
UpperCamelCase_ = prefix + resnet_block
UpperCamelCase_ = re_prior_cond_resnet.sub(__lowercase , __lowercase)
elif re_prior_cond_proj_in.fullmatch(__lowercase):
UpperCamelCase_ = re_prior_cond_proj_in.match(__lowercase)
UpperCamelCase_ = regex_match.groups()
UpperCamelCase_ = f"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
UpperCamelCase_ = re_prior_cond_proj_in.sub(__lowercase , __lowercase)
# keep original key
else:
UpperCamelCase_ = original_key
UpperCamelCase_ = replace_key(__lowercase)
if f"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(f"""failed converting {original_key} to {key}, does not match""")
# handle missmatched shape
elif value.shape != model_state_dict[f"""{key_prefix}.{key}"""].shape:
UpperCamelCase_ = model_state_dict[f"""{key_prefix}.{key}"""]
print(f"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""")
UpperCamelCase_ = original_key
UpperCamelCase_ = original_key
UpperCamelCase_ = value
return new_dict
@torch.no_grad()
def _snake_case (__lowercase=None , __lowercase=None):
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f"""{pytorch_dump_folder_path}/{file.split("/")[-1]}"""):
UpperCamelCase_ = requests.get(f"""{PREFIX}{file}""" , allow_redirects=__lowercase)
os.makedirs(f"""{pytorch_dump_folder_path}/""" , exist_ok=__lowercase)
open(f"""{pytorch_dump_folder_path}/{file.split("/")[-1]}""" , 'wb').write(r.content)
UpperCamelCase_ = MODEL_MAPPING[model_name.split('/')[-1]]
UpperCamelCase_ = JukeboxConfig.from_pretrained(__lowercase)
UpperCamelCase_ = JukeboxModel(__lowercase)
UpperCamelCase_ = []
UpperCamelCase_ = {}
for i, dict_name in enumerate(__lowercase):
UpperCamelCase_ = torch.load(f"""{pytorch_dump_folder_path}/{dict_name.split("/")[-1]}""")['model']
UpperCamelCase_ = {}
for k in old_dic.keys():
if k.endswith('.b'):
UpperCamelCase_ = old_dic[k]
elif k.endswith('.w'):
UpperCamelCase_ = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
UpperCamelCase_ = old_dic[k]
else:
UpperCamelCase_ = old_dic[k]
UpperCamelCase_ = 'vqvae' if i == 0 else f"""priors.{3 - i}"""
UpperCamelCase_ = fix_jukebox_keys(__lowercase , model.state_dict() , __lowercase , __lowercase)
weight_dict.append(__lowercase)
UpperCamelCase_ = weight_dict.pop(0)
model.vqvae.load_state_dict(__lowercase)
for i in range(len(__lowercase)):
model.priors[i].load_state_dict(weight_dict[2 - i])
Path(__lowercase).mkdir(exist_ok=__lowercase)
with open(f"""{pytorch_dump_folder_path}/mapping.json""" , 'w') as txtfile:
json.dump(__lowercase , __lowercase)
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""")
model.save_pretrained(__lowercase)
return weight_dict
if __name__ == "__main__":
snake_case__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
snake_case__ : int = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 23 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( __snake_case : list[int] , __snake_case : list[int] , __snake_case : int ) -> tuple[float, list[float]]:
__A : int = list(range(len(__snake_case ) ) )
__A : Optional[Any] = [v / w for v, w in zip(__snake_case , __snake_case )]
index.sort(key=lambda __snake_case : ratio[i] , reverse=__snake_case )
__A : float = 0
__A : list[float] = [0] * len(__snake_case )
for i in index:
if weight[i] <= capacity:
__A : Optional[int] = 1
max_value += value[i]
capacity -= weight[i]
else:
__A : List[Any] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 0 |
'''simple docstring'''
from __future__ import annotations
from math import pi, sqrt
def _UpperCamelCase (_lowerCamelCase : float , _lowerCamelCase : float )-> tuple:
'''simple docstring'''
if inductance <= 0:
raise ValueError('''Inductance cannot be 0 or negative''' )
elif capacitance <= 0:
raise ValueError('''Capacitance cannot be 0 or negative''' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 |
'''simple docstring'''
from __future__ import annotations
import math
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase):
'''simple docstring'''
__A : int = size
# approximate the overall size of segment tree with given value
__A : Optional[Any] = [0 for i in range(0 , 4 * size)]
# create array to store lazy update
__A : Optional[Any] = [0 for i in range(0 , 4 * size)]
__A : str = [0 for i in range(0 , 4 * size)] # flag for lazy update
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return idx * 2
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return idx * 2 + 1
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if left_element == right_element:
__A : List[Any] = a[left_element - 1]
else:
__A : List[str] = (left_element + right_element) // 2
self.build(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
self.build(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase)
__A : Any = max(
self.segment_tree[self.left(_UpperCAmelCase)] , self.segment_tree[self.right(_UpperCAmelCase)])
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if self.flag[idx] is True:
__A : Optional[Any] = self.lazy[idx]
__A : Optional[Any] = False
if left_element != right_element:
__A : List[Any] = self.lazy[idx]
__A : Dict = self.lazy[idx]
__A : Tuple = True
__A : Union[str, Any] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
__A : Optional[int] = val
if left_element != right_element:
__A : Tuple = val
__A : Any = val
__A : Tuple = True
__A : Union[str, Any] = True
return True
__A : str = (left_element + right_element) // 2
self.update(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
self.update(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
__A : int = max(
self.segment_tree[self.left(_UpperCAmelCase)] , self.segment_tree[self.right(_UpperCAmelCase)])
return True
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if self.flag[idx] is True:
__A : Union[str, Any] = self.lazy[idx]
__A : List[str] = False
if left_element != right_element:
__A : Union[str, Any] = self.lazy[idx]
__A : Optional[int] = self.lazy[idx]
__A : str = True
__A : Union[str, Any] = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
__A : Any = (left_element + right_element) // 2
__A : int = self.query(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
__A : Union[str, Any] = self.query(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
return max(_UpperCAmelCase , _UpperCAmelCase)
def __str__( self):
'''simple docstring'''
return str([self.query(1 , 1 , self.size , _UpperCAmelCase , _UpperCAmelCase) for i in range(1 , self.size + 1)])
if __name__ == "__main__":
lowercase__ : Union[str, Any] = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
lowercase__ : str = 15
lowercase__ : List[Any] = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 1_11)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 2_35)
print(segt) | 8 | 0 |
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , a : Tuple , a : List[str]=13 , a : List[str]=7 , a : Union[str, Any]=True , a : List[Any]=True , a : Any=False , a : List[str]=True , a : str=99 , a : Union[str, Any]=64 , a : Any=5 , a : Dict=4 , a : List[Any]=64 , a : Optional[Any]="gelu" , a : Tuple=0.1 , a : Union[str, Any]=0.1 , a : Dict=512 , a : List[str]=16 , a : Tuple=2 , a : Optional[int]=0.02 , a : Tuple=3 , a : List[Any]=4 , a : Any=None , ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = parent
SCREAMING_SNAKE_CASE : List[Any] = batch_size
SCREAMING_SNAKE_CASE : Any = seq_length
SCREAMING_SNAKE_CASE : Tuple = is_training
SCREAMING_SNAKE_CASE : List[Any] = use_input_mask
SCREAMING_SNAKE_CASE : Union[str, Any] = use_token_type_ids
SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : int = hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE : List[str] = type_vocab_size
SCREAMING_SNAKE_CASE : str = type_sequence_label_size
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Dict = num_labels
SCREAMING_SNAKE_CASE : Optional[int] = num_choices
SCREAMING_SNAKE_CASE : Optional[int] = scope
def __UpperCamelCase ( self : int ) -> List[str]:
"""simple docstring"""
return MPNetConfig.from_pretrained("microsoft/mpnet-base" )
def __UpperCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : Tuple = None
if self.use_labels:
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self : Optional[int] , a : Union[str, Any] , a : str , a : int , a : List[str] , a : Union[str, Any] , a : List[str] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = MPNetModel(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Tuple = model(a , a )
SCREAMING_SNAKE_CASE : int = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __UpperCamelCase ( self : List[str] , a : Any , a : Union[str, Any] , a : Dict , a : int , a : List[Any] , a : Dict ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = MPNetForQuestionAnswering(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(
a , attention_mask=a , start_positions=a , end_positions=a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : Tuple , a : Union[str, Any] , a : int , a : Optional[int] , a : Dict , a : Any , a : List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE : Dict = MPNetForSequenceClassification(a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : List[str] , a : Any , a : Optional[Any] , a : Dict , a : Optional[Any] , a : int , a : Tuple ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.num_choices
SCREAMING_SNAKE_CASE : Optional[int] = MPNetForMultipleChoice(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : List[Any] = model(
a , attention_mask=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : Optional[Any] , a : str , a : Tuple , a : Optional[int] , a : List[Any] , a : Tuple , a : int ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.num_labels
SCREAMING_SNAKE_CASE : List[str] = MPNetForTokenClassification(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE)) : Optional[Any] = config_and_inputs
SCREAMING_SNAKE_CASE : List[str] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( __A , __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
lowerCamelCase__ =(
{
'feature-extraction': MPNetModel,
'fill-mask': MPNetForMaskedLM,
'question-answering': MPNetForQuestionAnswering,
'text-classification': MPNetForSequenceClassification,
'token-classification': MPNetForTokenClassification,
'zero-shot': MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ =False
lowerCamelCase__ =True
def __UpperCamelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = MPNetModelTester(self )
SCREAMING_SNAKE_CASE : str = ConfigTester(self , config_class=a , hidden_size=37 )
def __UpperCamelCase ( self : int ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*a )
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*a )
def __UpperCamelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*a )
def __UpperCamelCase ( self : Any ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*a )
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*a )
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = MPNetModel.from_pretrained("microsoft/mpnet-base" )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
SCREAMING_SNAKE_CASE : List[str] = model(a )[0]
SCREAMING_SNAKE_CASE : Tuple = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , a )
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , a , atol=1e-4 ) ) | 25 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : int , __snake_case : int , __snake_case : int ) -> float:
__A : Dict = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def _lowerCAmelCase ( ) -> Union[str, Any]:
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 0 |
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> float:
"""simple docstring"""
__snake_case : str = np.array([[1, item, train_mtch[i]] for i, item in enumerate(_lowerCamelCase )] )
__snake_case : Optional[Any] = np.array(_lowerCamelCase )
__snake_case : List[str] = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , _lowerCamelCase ) ) , x.transpose() ) , _lowerCamelCase )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> float:
"""simple docstring"""
__snake_case : Optional[int] = (1, 2, 1)
__snake_case : Optional[int] = (1, 1, 0, 7)
__snake_case : List[Any] = SARIMAX(
_lowerCamelCase , exog=_lowerCamelCase , order=_lowerCamelCase , seasonal_order=_lowerCamelCase )
__snake_case : List[Any] = model.fit(disp=_lowerCamelCase , maxiter=600 , method="""nm""" )
__snake_case : List[Any] = model_fit.predict(1 , len(_lowerCamelCase ) , exog=[test_match] )
return result[0]
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> float:
"""simple docstring"""
__snake_case : Optional[Any] = SVR(kernel="""rbf""" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(_lowerCamelCase , _lowerCamelCase )
__snake_case : Optional[Any] = regressor.predict(_lowerCamelCase )
return y_pred[0]
def _a ( _lowerCamelCase ) -> float:
"""simple docstring"""
train_user.sort()
__snake_case : List[str] = np.percentile(_lowerCamelCase , 25 )
__snake_case : Optional[int] = np.percentile(_lowerCamelCase , 75 )
__snake_case : List[Any] = qa - qa
__snake_case : List[str] = qa - (iqr * 0.1)
return low_lim
def _a ( _lowerCamelCase , _lowerCamelCase ) -> bool:
"""simple docstring"""
__snake_case : Tuple = 0
__snake_case : str = 0
for i in list_vote:
if i > actual_result:
__snake_case : str = not_safe + 1
else:
if abs(abs(_lowerCamelCase ) - abs(_lowerCamelCase ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
__UpperCamelCase = [[18231, 0.0, 1], [22621, 1.0, 2], [15675, 0.0, 3], [23583, 1.0, 4]]
__UpperCamelCase = pd.DataFrame(
data_input, columns=["total_user", "total_even", "days"]
)
__UpperCamelCase = Normalizer().fit_transform(data_input_df.values)
# split data
__UpperCamelCase = normalize_df[:, 2].tolist()
__UpperCamelCase = normalize_df[:, 0].tolist()
__UpperCamelCase = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
__UpperCamelCase = normalize_df[:, [1, 2]].tolist()
__UpperCamelCase = x[: len(x) - 1]
__UpperCamelCase = x[len(x) - 1 :]
# for linear regression & sarimax
__UpperCamelCase = total_date[: len(total_date) - 1]
__UpperCamelCase = total_user[: len(total_user) - 1]
__UpperCamelCase = total_match[: len(total_match) - 1]
__UpperCamelCase = total_date[len(total_date) - 1 :]
__UpperCamelCase = total_user[len(total_user) - 1 :]
__UpperCamelCase = total_match[len(total_match) - 1 :]
# voting system with forecasting
__UpperCamelCase = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
__UpperCamelCase = "" if data_safety_checker(res_vote, tst_user) else "not "
print("Today's data is {not_str}safe.")
| 26 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ):
'''simple docstring'''
__A : Optional[int] = parent
__A : str = 13
__A : List[Any] = 7
__A : List[str] = True
__A : str = True
__A : Optional[Any] = True
__A : int = True
__A : Dict = 99
__A : Dict = 384
__A : Any = 2
__A : int = 4
__A : Optional[Any] = 37
__A : Optional[int] = 'gelu'
__A : Dict = 0.1
__A : Optional[int] = 0.1
__A : Any = 512
__A : int = 16
__A : List[str] = 2
__A : str = 0.02
__A : Any = 3
__A : str = 4
__A : Union[str, Any] = 128
__A : int = 2
__A : List[Any] = 9
__A : List[Any] = 1
__A : List[Any] = None
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__A : str = None
if self.use_input_mask:
__A : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
__A : Optional[Any] = None
if self.use_token_type_ids:
__A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__A : Optional[int] = None
__A : List[str] = None
__A : Dict = None
if self.use_labels:
__A : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__A : str = ids_tensor([self.batch_size] , self.num_choices)
__A : List[Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : int = TFConvBertModel(config=_UpperCAmelCase)
__A : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__A : Tuple = [input_ids, input_mask]
__A : Any = model(_UpperCAmelCase)
__A : Dict = model(_UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : str = TFConvBertForMaskedLM(config=_UpperCAmelCase)
__A : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : str = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[int] = self.num_labels
__A : Any = TFConvBertForSequenceClassification(config=_UpperCAmelCase)
__A : Optional[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : Dict = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Tuple = self.num_choices
__A : List[str] = TFConvBertForMultipleChoice(config=_UpperCAmelCase)
__A : int = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : Optional[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : List[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : int = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__A : Optional[Any] = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = self.num_labels
__A : List[Any] = TFConvBertForTokenClassification(config=_UpperCAmelCase)
__A : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : int = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[Any] = TFConvBertForQuestionAnswering(config=_UpperCAmelCase)
__A : Any = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : Union[str, Any] = model(_UpperCAmelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = self.prepare_config_and_inputs()
(
(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,
) : Union[str, Any] = config_and_inputs
__A : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE (a__ , a__ , unittest.TestCase ):
lowerCAmelCase = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = TFConvBertModelTester(self)
__A : str = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase)
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__A : List[str] = True
__A : List[str] = True
if hasattr(_UpperCAmelCase , 'use_cache'):
__A : List[Any] = True
__A : str = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length)
__A : Union[str, Any] = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
for model_class in self.all_model_classes:
__A : List[str] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)
__A : Optional[int] = model_class(_UpperCAmelCase)
__A : Optional[Any] = len(model(_UpperCAmelCase))
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase)
__A : Union[str, Any] = os.path.join(_UpperCAmelCase , 'saved_model' , '1')
__A : Tuple = tf.keras.models.load_model(_UpperCAmelCase)
__A : str = model(_UpperCAmelCase)
if self.is_encoder_decoder:
__A : Optional[int] = outputs['encoder_hidden_states']
__A : str = outputs['encoder_attentions']
else:
__A : List[Any] = outputs['hidden_states']
__A : Optional[Any] = outputs['attentions']
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
__A : str = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
self.assertListEqual(
list(output_hidden_states[0].shape[-2:]) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(output_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = TFConvBertModel.from_pretrained('YituTech/conv-bert-base')
self.assertIsNotNone(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__A : Any = True
__A : str = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length)
__A : Any = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length)
__A : int = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
__A : Tuple = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
def check_decoder_attentions_output(_UpperCAmelCase):
__A : List[str] = len(_UpperCAmelCase)
self.assertEqual(out_len % 2 , 0)
__A : Any = outputs.decoder_attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_UpperCAmelCase):
__A : str = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__A : Dict = True
__A : Any = False
__A : str = model_class(_UpperCAmelCase)
__A : List[str] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : List[str] = len(_UpperCAmelCase)
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
if self.is_encoder_decoder:
__A : Union[str, Any] = model_class(_UpperCAmelCase)
__A : int = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_decoder_attentions_output(_UpperCAmelCase)
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__A : int = True
__A : Tuple = model_class(_UpperCAmelCase)
__A : Dict = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
# Check attention is always last and order is fine
__A : Any = True
__A : str = True
__A : Union[str, Any] = model_class(_UpperCAmelCase)
__A : Union[str, Any] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase))
self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
@require_tf
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = TFConvBertModel.from_pretrained('YituTech/conv-bert-base')
__A : str = tf.constant([[0, 1, 2, 3, 4, 5]])
__A : Optional[int] = model(_UpperCAmelCase)[0]
__A : List[Any] = [1, 6, 768]
self.assertEqual(output.shape , _UpperCAmelCase)
__A : Tuple = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
])
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4) | 8 | 0 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__A : Union[str, Any] = logging.get_logger(__name__)
class lowerCamelCase( __snake_case ):
'''simple docstring'''
__magic_name__ = ['input_features', 'is_longer']
def __init__( self , snake_case_=64 , snake_case_=4_8000 , snake_case_=480 , snake_case_=10 , snake_case_=1024 , snake_case_=0.0 , snake_case_=False , snake_case_ = 0 , snake_case_ = 1_4000 , snake_case_ = None , snake_case_ = "fusion" , snake_case_ = "repeatpad" , **snake_case_ , ):
super().__init__(
feature_size=snake_case_ , sampling_rate=snake_case_ , padding_value=snake_case_ , return_attention_mask=snake_case_ , **snake_case_ , )
_A = top_db
_A = truncation
_A = padding
_A = fft_window_size
_A = (fft_window_size >> 1) + 1
_A = hop_length
_A = max_length_s
_A = max_length_s * sampling_rate
_A = sampling_rate
_A = frequency_min
_A = frequency_max
_A = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case_ , min_frequency=snake_case_ , max_frequency=snake_case_ , sampling_rate=snake_case_ , norm=snake_case_ , mel_scale='htk' , )
_A = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case_ , min_frequency=snake_case_ , max_frequency=snake_case_ , sampling_rate=snake_case_ , norm='slaney' , mel_scale='slaney' , )
def lowerCAmelCase__ ( self ):
_A = copy.deepcopy(self.__dict__ )
_A = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ = None ):
_A = spectrogram(
snake_case_ , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=snake_case_ , log_mel='dB' , )
return log_mel_spectrogram.T
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ ):
_A = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
_A = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
_A = [0]
# randomly choose index for each part
_A = np.random.choice(ranges[0] )
_A = np.random.choice(ranges[1] )
_A = np.random.choice(ranges[2] )
_A = mel[idx_front : idx_front + chunk_frames, :]
_A = mel[idx_middle : idx_middle + chunk_frames, :]
_A = mel[idx_back : idx_back + chunk_frames, :]
_A = torch.tensor(mel[None, None, :] )
_A = torch.nn.functional.interpolate(
snake_case_ , size=[chunk_frames, 64] , mode='bilinear' , align_corners=snake_case_ )
_A = mel_shrink[0][0].numpy()
_A = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
_A = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
_A = len(snake_case_ ) - max_length
_A = np.random.randint(0 , overflow + 1 )
_A = waveform[idx : idx + max_length]
_A = self._np_extract_fbank_features(snake_case_ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
_A = self._np_extract_fbank_features(snake_case_ , self.mel_filters )
_A = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
_A = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
_A = np.stack([mel, mel, mel, mel] , axis=0 )
_A = False
else:
_A = self._random_mel_fusion(snake_case_ , snake_case_ , snake_case_ )
_A = True
else:
raise NotImplementedError(F"data_truncating {truncation} not implemented" )
else:
_A = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
_A = int(max_length / len(snake_case_ ) )
_A = np.stack(np.tile(snake_case_ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
_A = int(max_length / len(snake_case_ ) )
_A = np.stack(np.tile(snake_case_ , snake_case_ ) )
_A = np.pad(snake_case_ , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 )
if truncation == "fusion":
_A = self._np_extract_fbank_features(snake_case_ , self.mel_filters )
_A = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
_A = self._np_extract_fbank_features(snake_case_ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , **snake_case_ , ):
_A = truncation if truncation is not None else self.truncation
_A = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
F" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
F" was sampled with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
_A = isinstance(snake_case_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
_A = is_batched_numpy or (
isinstance(snake_case_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_A = [np.asarray(snake_case_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(snake_case_ , np.ndarray ):
_A = np.asarray(snake_case_ , dtype=np.floataa )
elif isinstance(snake_case_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_A = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_A = [np.asarray(snake_case_ )]
# convert to mel spectrogram, truncate and pad if needed.
_A = [
self._get_input_mel(snake_case_ , max_length if max_length else self.nb_max_samples , snake_case_ , snake_case_ )
for waveform in raw_speech
]
_A = []
_A = []
for mel, longer in padded_inputs:
input_mel.append(snake_case_ )
is_longer.append(snake_case_ )
if truncation == "fusion" and sum(snake_case_ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
_A = np.random.randint(0 , len(snake_case_ ) )
_A = True
if isinstance(input_mel[0] , snake_case_ ):
_A = [np.asarray(snake_case_ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
_A = [[longer] for longer in is_longer]
_A = {'input_features': input_mel, 'is_longer': is_longer}
_A = BatchFeature(snake_case_ )
if return_tensors is not None:
_A = input_features.convert_to_tensors(snake_case_ )
return input_features
| 27 |
'''simple docstring'''
import argparse
import os
import re
lowercase__ : Optional[int] = '''src/diffusers'''
# Pattern that looks at the indentation in a line.
lowercase__ : Dict = re.compile(r'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
lowercase__ : List[str] = re.compile(r'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowercase__ : Tuple = re.compile(r'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
lowercase__ : str = re.compile(r'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowercase__ : str = re.compile(r'''\[([^\]]+)\]''')
def _lowerCAmelCase ( __snake_case : str ) -> Tuple:
__A : List[Any] = _re_indent.search(__snake_case )
return "" if search is None else search.groups()[0]
def _lowerCAmelCase ( __snake_case : Optional[Any] , __snake_case : str="" , __snake_case : Any=None , __snake_case : List[Any]=None ) -> Optional[int]:
__A : Tuple = 0
__A : Optional[int] = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(__snake_case ):
index += 1
__A : Optional[int] = ['\n'.join(lines[:index] )]
else:
__A : Any = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__A : Tuple = [lines[index]]
index += 1
while index < len(__snake_case ) and (end_prompt is None or not lines[index].startswith(__snake_case )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(__snake_case ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(__snake_case ) )
if index < len(__snake_case ) - 1:
__A : Union[str, Any] = [lines[index + 1]]
index += 1
else:
__A : Union[str, Any] = []
else:
blocks.append('\n'.join(__snake_case ) )
__A : Optional[Any] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(__snake_case ) > 0:
blocks.append('\n'.join(__snake_case ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(__snake_case ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def _lowerCAmelCase ( __snake_case : List[Any] ) -> int:
def _inner(__snake_case : List[Any] ):
return key(__snake_case ).lower().replace('_' , '' )
return _inner
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : Any=None ) -> List[Any]:
# If no key is provided, we use a noop.
def noop(__snake_case : List[Any] ):
return x
if key is None:
__A : Optional[Any] = noop
# Constants are all uppercase, they go first.
__A : str = [obj for obj in objects if key(__snake_case ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__A : List[str] = [obj for obj in objects if key(__snake_case )[0].isupper() and not key(__snake_case ).isupper()]
# Functions begin with a lowercase, they go last.
__A : str = [obj for obj in objects if not key(__snake_case )[0].isupper()]
__A : Tuple = ignore_underscore(__snake_case )
return sorted(__snake_case , key=__snake_case ) + sorted(__snake_case , key=__snake_case ) + sorted(__snake_case , key=__snake_case )
def _lowerCAmelCase ( __snake_case : Optional[int] ) -> Tuple:
# This inner function sort imports between [ ].
def _replace(__snake_case : Tuple ):
__A : List[str] = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
__A : int = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__A : Dict = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(__snake_case )] ) + "]"
__A : List[Any] = import_statement.split('\n' )
if len(__snake_case ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__A : Optional[int] = 2 if lines[1].strip() == '[' else 1
__A : Any = [(i, _re_strip_line.search(__snake_case ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__A : Optional[int] = sort_objects(__snake_case , key=lambda __snake_case : x[1] )
__A : Any = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(__snake_case ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__A : Union[str, Any] = _re_bracket_content.sub(_replace , lines[1] )
else:
__A : Dict = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__A : Tuple = keys[:-1]
__A : List[Any] = get_indent(lines[1] ) + ', '.join([f'"{k}"' for k in sort_objects(__snake_case )] )
return "\n".join(__snake_case )
else:
# Finally we have to deal with imports fitting on one line
__A : Optional[Any] = _re_bracket_content.sub(_replace , __snake_case )
return import_statement
def _lowerCAmelCase ( __snake_case : List[Any] , __snake_case : List[Any]=True ) -> Optional[Any]:
with open(__snake_case , 'r' ) as f:
__A : Dict = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__A : str = split_code_in_indented_blocks(
__snake_case , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(__snake_case ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__A : Tuple = main_blocks[block_idx]
__A : int = block.split('\n' )
# Get to the start of the imports.
__A : Tuple = 0
while line_idx < len(__snake_case ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__A : Optional[int] = len(__snake_case )
else:
line_idx += 1
if line_idx >= len(__snake_case ):
continue
# Ignore beginning and last line: they don't contain anything.
__A : Dict = '\n'.join(block_lines[line_idx:-1] )
__A : int = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__A : Optional[int] = split_code_in_indented_blocks(__snake_case , indent_level=__snake_case )
# We have two categories of import key: list or _import_structure[key].append/extend
__A : Any = _re_direct_key if '_import_structure' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__A : Dict = [(pattern.search(__snake_case ).groups()[0] if pattern.search(__snake_case ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__A : Optional[Any] = [(i, key) for i, key in enumerate(__snake_case ) if key is not None]
__A : Tuple = [x[0] for x in sorted(__snake_case , key=lambda __snake_case : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__A : str = 0
__A : Any = []
for i in range(len(__snake_case ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
__A : str = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(__snake_case )
count += 1
# And we put our main block back together with its first and last line.
__A : int = '\n'.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(__snake_case ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(__snake_case , 'w' ) as f:
f.write('\n'.join(__snake_case ) )
def _lowerCAmelCase ( __snake_case : int=True ) -> Optional[Any]:
__A : Tuple = []
for root, _, files in os.walk(__snake_case ):
if "__init__.py" in files:
__A : List[Any] = sort_imports(os.path.join(__snake_case , '__init__.py' ) , check_only=__snake_case )
if result:
__A : Dict = [os.path.join(__snake_case , '__init__.py' )]
if len(__snake_case ) > 0:
raise ValueError(f'Would overwrite {len(__snake_case )} files, run `make style`.' )
if __name__ == "__main__":
lowercase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
lowercase__ : Union[str, Any] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only) | 8 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : List[str] = '''roformer'''
def __init__( self, A=50_000, A=None, A=768, A=12, A=12, A=3_072, A="gelu", A=0.1, A=0.1, A=1_536, A=2, A=0.02, A=1E-12, A=0, A=False, A=True, **A, ):
'''simple docstring'''
super().__init__(pad_token_id=A, **A )
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : int = hidden_size if embedding_size is None else embedding_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE : Any = hidden_act
SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE : List[str] = type_vocab_size
SCREAMING_SNAKE_CASE : List[str] = initializer_range
SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE : Tuple = rotary_value
SCREAMING_SNAKE_CASE : str = use_cache
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Optional[int] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE : Optional[int] = {0: 'batch', 1: 'sequence'}
SCREAMING_SNAKE_CASE : int = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 28 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : int ) -> bool:
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('''Program to check whether a number is a Perfect number or not...''')
lowercase__ : int = int(input('''Enter number: ''').strip())
print(f"""{number} is {"" if perfect(number) else "not "}a Perfect Number.""") | 8 | 0 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class __lowerCamelCase ( lowerCAmelCase ):
a__: Tuple = 'SpeechT5FeatureExtractor'
a__: List[str] = 'SpeechT5Tokenizer'
def __init__( self , UpperCAmelCase , UpperCAmelCase ):
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self , *UpperCAmelCase , **UpperCAmelCase ):
lowerCamelCase_ = kwargs.pop('''audio''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''text''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''text_target''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''audio_target''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''sampling_rate''' , UpperCAmelCase )
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''' )
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''' )
if audio is not None:
lowerCamelCase_ = self.feature_extractor(UpperCAmelCase , *UpperCAmelCase , sampling_rate=UpperCAmelCase , **UpperCAmelCase )
elif text is not None:
lowerCamelCase_ = self.tokenizer(UpperCAmelCase , **UpperCAmelCase )
else:
lowerCamelCase_ = None
if audio_target is not None:
lowerCamelCase_ = self.feature_extractor(audio_target=UpperCAmelCase , *UpperCAmelCase , sampling_rate=UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = targets['''input_values''']
elif text_target is not None:
lowerCamelCase_ = self.tokenizer(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = targets['''input_ids''']
else:
lowerCamelCase_ = None
if inputs is None:
return targets
if targets is not None:
lowerCamelCase_ = labels
lowerCamelCase_ = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
lowerCamelCase_ = decoder_attention_mask
return inputs
def UpperCAmelCase__ ( self , *UpperCAmelCase , **UpperCAmelCase ):
lowerCamelCase_ = kwargs.pop('''input_values''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''input_ids''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''labels''' , UpperCAmelCase )
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''' )
if input_values is not None:
lowerCamelCase_ = self.feature_extractor.pad(UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase )
elif input_ids is not None:
lowerCamelCase_ = self.tokenizer.pad(UpperCAmelCase , **UpperCAmelCase )
else:
lowerCamelCase_ = None
if labels is not None:
if "input_ids" in labels or (isinstance(UpperCAmelCase , UpperCAmelCase ) and "input_ids" in labels[0]):
lowerCamelCase_ = self.tokenizer.pad(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = targets['''input_ids''']
else:
lowerCamelCase_ = self.feature_extractor.feature_size
lowerCamelCase_ = self.feature_extractor.num_mel_bins
lowerCamelCase_ = self.feature_extractor.pad(UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = feature_size_hack
lowerCamelCase_ = targets['''input_values''']
else:
lowerCamelCase_ = None
if inputs is None:
return targets
if targets is not None:
lowerCamelCase_ = labels
lowerCamelCase_ = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
lowerCamelCase_ = decoder_attention_mask
return inputs
def UpperCAmelCase__ ( self , *UpperCAmelCase , **UpperCAmelCase ):
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase__ ( self , *UpperCAmelCase , **UpperCAmelCase ):
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
| 29 |
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Tuple = logging.get_logger(__name__)
lowercase__ : str = [
['''attention''', '''attn'''],
['''encoder_attention''', '''encoder_attn'''],
['''q_lin''', '''q_proj'''],
['''k_lin''', '''k_proj'''],
['''v_lin''', '''v_proj'''],
['''out_lin''', '''out_proj'''],
['''norm_embeddings''', '''layernorm_embedding'''],
['''position_embeddings''', '''embed_positions'''],
['''embeddings''', '''embed_tokens'''],
['''ffn.lin''', '''fc'''],
]
def _lowerCAmelCase ( __snake_case : List[Any] ) -> Tuple:
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__A : Optional[Any] = k.replace(__snake_case , __snake_case )
if k.startswith('encoder' ):
__A : Any = k.replace('.attn' , '.self_attn' )
__A : Any = k.replace('norm1' , 'self_attn_layer_norm' )
__A : str = k.replace('norm2' , 'final_layer_norm' )
elif k.startswith('decoder' ):
__A : Tuple = k.replace('norm1' , 'self_attn_layer_norm' )
__A : str = k.replace('norm2' , 'encoder_attn_layer_norm' )
__A : int = k.replace('norm3' , 'final_layer_norm' )
return k
def _lowerCAmelCase ( __snake_case : List[Any] ) -> Dict:
__A : Optional[int] = [
'model.encoder.layernorm_embedding.weight',
'model.encoder.layernorm_embedding.bias',
'model.decoder.layernorm_embedding.weight',
'model.decoder.layernorm_embedding.bias',
]
for k in keys:
__A : Tuple = sd.pop(__snake_case )
__A : Union[str, Any] = k.replace('layernorm_embedding' , 'layer_norm' )
assert new_k not in sd
__A : str = v
lowercase__ : Tuple = ['''START''']
@torch.no_grad()
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : Any , __snake_case : List[Any] ) -> int:
__A : List[str] = torch.load(__snake_case , map_location='cpu' )
__A : Tuple = model['model']
__A : str = BlenderbotConfig.from_json_file(__snake_case )
__A : int = BlenderbotForConditionalGeneration(__snake_case )
__A : List[Any] = m.model.state_dict().keys()
__A : Optional[int] = []
__A : Optional[int] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__A : Union[str, Any] = rename_state_dict_key(__snake_case )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__A : Optional[Any] = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__snake_case )
m.model.load_state_dict(__snake_case , strict=__snake_case )
m.half()
m.save_pretrained(__snake_case )
if __name__ == "__main__":
lowercase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''')
parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''')
parser.add_argument(
'''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use'''
)
lowercase__ : Optional[Any] = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json) | 8 | 0 |
import colorsys
from PIL import Image # type: ignore
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = x
UpperCAmelCase_ : List[str] = y
for step in range(_lowercase ): # noqa: B007
UpperCAmelCase_ : Union[str, Any] = a * a - b * b + x
UpperCAmelCase_ : List[Any] = 2 * a * b + y
UpperCAmelCase_ : List[Any] = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(_lowercase , 1 , 1 ) )
def lowerCamelCase__ ( _lowercase = 800 , _lowercase = 600 , _lowercase = -0.6 , _lowercase = 0 , _lowercase = 3.2 , _lowercase = 50 , _lowercase = True , ):
'''simple docstring'''
UpperCAmelCase_ : int = Image.new('''RGB''' , (image_width, image_height) )
UpperCAmelCase_ : List[str] = img.load()
# loop through the image-coordinates
for image_x in range(_lowercase ):
for image_y in range(_lowercase ):
# determine the figure-coordinates based on the image-coordinates
UpperCAmelCase_ : str = figure_width / image_width * image_height
UpperCAmelCase_ : Dict = figure_center_x + (image_x / image_width - 0.5) * figure_width
UpperCAmelCase_ : Any = figure_center_y + (image_y / image_height - 0.5) * figure_height
UpperCAmelCase_ : int = get_distance(_lowercase , _lowercase , _lowercase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
UpperCAmelCase_ : Dict = get_color_coded_rgb(_lowercase )
else:
UpperCAmelCase_ : int = get_black_and_white_rgb(_lowercase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
__a = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show() | 30 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None):
'''simple docstring'''
__A : List[Any] = list(poly_a or [0])[:]
__A : Optional[int] = list(poly_b or [0])[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
__A : Union[str, Any] = len(self.polyA)
while self.polyB[-1] == 0:
self.polyB.pop()
__A : Optional[int] = len(self.polyB)
# Add 0 to make lengths equal a power of 2
__A : Optional[Any] = int(
2 ** np.ceil(np.loga(len(self.polyA) + len(self.polyB) - 1)))
while len(self.polyA) < self.c_max_length:
self.polyA.append(0)
while len(self.polyB) < self.c_max_length:
self.polyB.append(0)
# A complex root used for the fourier transform
__A : str = complex(mpmath.root(x=1 , n=self.c_max_length , k=1))
# The product
__A : Tuple = self.__multiply()
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[int] = [[x] for x in self.polyA] if which == 'A' else [[x] for x in self.polyB]
# Corner case
if len(_UpperCAmelCase) <= 1:
return dft[0]
#
__A : Dict = self.c_max_length // 2
while next_ncol > 0:
__A : Optional[Any] = [[] for i in range(_UpperCAmelCase)]
__A : Tuple = self.root**next_ncol
# First half of next step
__A : Optional[Any] = 1
for j in range(self.c_max_length // (next_ncol * 2)):
for i in range(_UpperCAmelCase):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j])
current_root *= root
# Second half of next step
__A : List[str] = 1
for j in range(self.c_max_length // (next_ncol * 2)):
for i in range(_UpperCAmelCase):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j])
current_root *= root
# Update
__A : Optional[int] = new_dft
__A : Tuple = next_ncol // 2
return dft[0]
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.__dft('A')
__A : Optional[Any] = self.__dft('B')
__A : str = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length)]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0]) <= 1:
return inverce_c[0]
# Inverse DFT
__A : Dict = 2
while next_ncol <= self.c_max_length:
__A : Optional[int] = [[] for i in range(_UpperCAmelCase)]
__A : Any = self.root ** (next_ncol // 2)
__A : Tuple = 1
# First half of next step
for j in range(self.c_max_length // next_ncol):
for i in range(next_ncol // 2):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2)
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root))
current_root *= root
# Update
__A : int = new_inverse_c
next_ncol *= 2
# Unpack
__A : Optional[int] = [round(x[0].real , 8) + round(x[0].imag , 8) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self):
'''simple docstring'''
__A : int = 'A = ' + ' + '.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A]))
__A : Optional[Any] = 'B = ' + ' + '.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B]))
__A : str = 'A*B = ' + ' + '.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.product))
return F'{a}\n{b}\n{c}'
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 0 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : int ):
SCREAMING_SNAKE_CASE_ = jnp.ones((batch_size, length) ) / length
return scores
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = 20
SCREAMING_SNAKE_CASE_ = self._get_uniform_logits(batch_size=2 , length=_lowerCAmelCase )
# tweak scores to not be uniform anymore
SCREAMING_SNAKE_CASE_ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
SCREAMING_SNAKE_CASE_ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
SCREAMING_SNAKE_CASE_ = jax.nn.softmax(_lowerCAmelCase , axis=-1 )
SCREAMING_SNAKE_CASE_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
SCREAMING_SNAKE_CASE_ = FlaxTemperatureLogitsWarper(temperature=1.3 )
SCREAMING_SNAKE_CASE_ = jax.nn.softmax(temp_dist_warper_sharper(_lowerCAmelCase , scores.copy() , cur_len=_lowerCAmelCase ) , axis=-1 )
SCREAMING_SNAKE_CASE_ = jax.nn.softmax(temp_dist_warper_smoother(_lowerCAmelCase , scores.copy() , cur_len=_lowerCAmelCase ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = 10
SCREAMING_SNAKE_CASE_ = 2
# create ramp distribution
SCREAMING_SNAKE_CASE_ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] , (batch_size, vocab_size) ).copy()
SCREAMING_SNAKE_CASE_ = ramp_logits[1:, : vocab_size // 2] + vocab_size
SCREAMING_SNAKE_CASE_ = FlaxTopKLogitsWarper(3 )
SCREAMING_SNAKE_CASE_ = top_k_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
SCREAMING_SNAKE_CASE_ = 5
SCREAMING_SNAKE_CASE_ = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
SCREAMING_SNAKE_CASE_ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] , (batch_size, length) ).copy()
SCREAMING_SNAKE_CASE_ = top_k_warp_safety_check(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = 10
SCREAMING_SNAKE_CASE_ = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
SCREAMING_SNAKE_CASE_ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
SCREAMING_SNAKE_CASE_ = FlaxTopPLogitsWarper(0.8 )
SCREAMING_SNAKE_CASE_ = np.exp(top_p_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
SCREAMING_SNAKE_CASE_ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
# check edge cases with negative and extreme logits
SCREAMING_SNAKE_CASE_ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
SCREAMING_SNAKE_CASE_ = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
SCREAMING_SNAKE_CASE_ = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
SCREAMING_SNAKE_CASE_ = top_p_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ = 20
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_lowerCAmelCase )
# check that min length is applied at length 5
SCREAMING_SNAKE_CASE_ = ids_tensor((batch_size, 20) , vocab_size=20 )
SCREAMING_SNAKE_CASE_ = 5
SCREAMING_SNAKE_CASE_ = self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = min_dist_processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('inf' )] )
# check that min length is not applied anymore at length 15
SCREAMING_SNAKE_CASE_ = self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = 15
SCREAMING_SNAKE_CASE_ = min_dist_processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = 20
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
# check that all scores are -inf except the bos_token_id score
SCREAMING_SNAKE_CASE_ = ids_tensor((batch_size, 1) , vocab_size=20 )
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = logits_processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = logits_processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = 20
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 5
SCREAMING_SNAKE_CASE_ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase , eos_token_id=_lowerCAmelCase )
# check that all scores are -inf except the eos_token_id when max_length is reached
SCREAMING_SNAKE_CASE_ = ids_tensor((batch_size, 4) , vocab_size=20 )
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = logits_processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = logits_processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 10
SCREAMING_SNAKE_CASE_ = 15
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 15
# dummy input_ids and scores
SCREAMING_SNAKE_CASE_ = ids_tensor((batch_size, sequence_length) , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = input_ids.copy()
SCREAMING_SNAKE_CASE_ = self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = scores.copy()
# instantiate all dist processors
SCREAMING_SNAKE_CASE_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
SCREAMING_SNAKE_CASE_ = FlaxTopKLogitsWarper(3 )
SCREAMING_SNAKE_CASE_ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
SCREAMING_SNAKE_CASE_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase , eos_token_id=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = 10
# no processor list
SCREAMING_SNAKE_CASE_ = temp_dist_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = top_k_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = top_p_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = min_dist_proc(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = bos_dist_proc(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = eos_dist_proc(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
# with processor list
SCREAMING_SNAKE_CASE_ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
SCREAMING_SNAKE_CASE_ = processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 10
SCREAMING_SNAKE_CASE_ = 15
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 15
# dummy input_ids and scores
SCREAMING_SNAKE_CASE_ = ids_tensor((batch_size, sequence_length) , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = input_ids.copy()
SCREAMING_SNAKE_CASE_ = self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = scores.copy()
# instantiate all dist processors
SCREAMING_SNAKE_CASE_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
SCREAMING_SNAKE_CASE_ = FlaxTopKLogitsWarper(3 )
SCREAMING_SNAKE_CASE_ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
SCREAMING_SNAKE_CASE_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase , eos_token_id=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = 10
# no processor list
def run_no_processor_list(_lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int ):
SCREAMING_SNAKE_CASE_ = temp_dist_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = top_k_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = top_p_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = min_dist_proc(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = bos_dist_proc(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = eos_dist_proc(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
return scores
# with processor list
def run_processor_list(_lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
SCREAMING_SNAKE_CASE_ = processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
return scores
SCREAMING_SNAKE_CASE_ = jax.jit(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = jax.jit(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = jitted_run_no_processor_list(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = jitted_run_processor_list(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() ) | 31 |
'''simple docstring'''
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=[30, 30] , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=10 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=None , _UpperCAmelCase=8 , _UpperCAmelCase=10 , ):
'''simple docstring'''
__A : Union[str, Any] = parent
__A : Tuple = batch_size
__A : List[str] = image_size
__A : Dict = patch_size
__A : Optional[Any] = num_channels
__A : Tuple = is_training
__A : Dict = use_labels
__A : List[Any] = hidden_size
__A : Tuple = num_hidden_layers
__A : int = num_attention_heads
__A : Optional[int] = intermediate_size
__A : Tuple = hidden_act
__A : Any = hidden_dropout_prob
__A : Optional[Any] = attention_probs_dropout_prob
__A : List[Any] = type_sequence_label_size
__A : List[Any] = initializer_range
__A : Optional[int] = num_labels
__A : List[Any] = scope
__A : Any = n_targets
__A : Union[str, Any] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
__A : List[str] = (image_size[1] // patch_size) * (image_size[0] // patch_size)
__A : int = num_patches + 1 + self.num_detection_tokens
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]])
__A : Tuple = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
__A : List[Any] = []
for i in range(self.batch_size):
__A : Optional[int] = {}
__A : Union[str, Any] = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=_UpperCAmelCase)
__A : str = torch.rand(self.n_targets , 4 , device=_UpperCAmelCase)
labels.append(_UpperCAmelCase)
__A : Any = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Any = YolosModel(config=_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
__A : Dict = model(_UpperCAmelCase)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Any = YolosForObjectDetection(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
__A : str = model(pixel_values=_UpperCAmelCase)
__A : List[str] = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1))
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4))
__A : Union[str, Any] = model(pixel_values=_UpperCAmelCase , labels=_UpperCAmelCase)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1))
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = self.prepare_config_and_inputs()
__A ,__A ,__A : Tuple = config_and_inputs
__A : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE (a__ , a__ , unittest.TestCase ):
lowerCAmelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
lowerCAmelCase = (
{'''feature-extraction''': YolosModel, '''object-detection''': YolosForObjectDetection} if is_torch_available() else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False):
'''simple docstring'''
__A : Optional[Any] = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase)
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
__A : Any = []
for i in range(self.model_tester.batch_size):
__A : Tuple = {}
__A : Tuple = torch.ones(
size=(self.model_tester.n_targets,) , device=_UpperCAmelCase , dtype=torch.long)
__A : Optional[Any] = torch.ones(
self.model_tester.n_targets , 4 , device=_UpperCAmelCase , dtype=torch.float)
labels.append(_UpperCAmelCase)
__A : str = labels
return inputs_dict
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = YolosModelTester(self)
__A : Dict = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : Tuple = model_class(_UpperCAmelCase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
__A : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : List[Any] = model_class(_UpperCAmelCase)
__A : str = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A : int = [*signature.parameters.keys()]
__A : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__A : Optional[int] = True
# in YOLOS, the seq_len is different
__A : Dict = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
__A : Dict = True
__A : Dict = False
__A : Union[str, Any] = True
__A : Tuple = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : Any = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Union[str, Any] = outputs.attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__A : List[Any] = True
__A : List[str] = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : List[Any] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Optional[Any] = outputs.attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
__A : str = len(_UpperCAmelCase)
# Check attention is always last and order is fine
__A : Dict = True
__A : Dict = True
__A : Dict = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : List[str] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Union[str, Any] = 1
self.assertEqual(out_len + added_hidden_states , len(_UpperCAmelCase))
__A : Optional[Any] = outputs.attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
__A : Tuple = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : List[str] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Optional[Any] = outputs.hidden_states
__A : List[str] = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
# YOLOS has a different seq_length
__A : Dict = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
__A ,__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : List[str] = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A : Optional[int] = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*_UpperCAmelCase)
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : List[Any] = YolosModel.from_pretrained(_UpperCAmelCase)
self.assertIsNotNone(_UpperCAmelCase)
def _lowerCAmelCase ( ) -> int:
__A : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('hustvl/yolos-small') if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = YolosForObjectDetection.from_pretrained('hustvl/yolos-small').to(_UpperCAmelCase)
__A : Any = self.default_image_processor
__A : str = prepare_img()
__A : int = image_processor(images=_UpperCAmelCase , return_tensors='pt').to(_UpperCAmelCase)
# forward pass
with torch.no_grad():
__A : str = model(inputs.pixel_values)
# verify outputs
__A : Tuple = torch.Size((1, 100, 92))
self.assertEqual(outputs.logits.shape , _UpperCAmelCase)
__A : Dict = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=_UpperCAmelCase , )
__A : int = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=_UpperCAmelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _UpperCAmelCase , atol=1e-4))
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , _UpperCAmelCase , atol=1e-4))
# verify postprocessing
__A : List[str] = image_processor.post_process_object_detection(
_UpperCAmelCase , threshold=0.3 , target_sizes=[image.size[::-1]])[0]
__A : Optional[int] = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861]).to(_UpperCAmelCase)
__A : Union[str, Any] = [75, 75, 17, 63, 17]
__A : Any = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495]).to(_UpperCAmelCase)
self.assertEqual(len(results['scores']) , 5)
self.assertTrue(torch.allclose(results['scores'] , _UpperCAmelCase , atol=1e-4))
self.assertSequenceEqual(results['labels'].tolist() , _UpperCAmelCase)
self.assertTrue(torch.allclose(results['boxes'][0, :] , _UpperCAmelCase)) | 8 | 0 |
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class __UpperCamelCase :
def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=7 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=99 , _UpperCamelCase=32 , _UpperCamelCase=5 , _UpperCamelCase=4 , _UpperCamelCase=37 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=50 , _UpperCamelCase=0.02 , _UpperCamelCase=True , _UpperCamelCase=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = initializer_range
_UpperCAmelCase = use_labels
_UpperCAmelCase = scope
def UpperCamelCase( self ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = self.get_config()
return config, input_ids, input_mask, token_labels
def UpperCamelCase( self ):
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , )
def UpperCamelCase( self ):
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = self.prepare_config_and_inputs()
_UpperCAmelCase = True
_UpperCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase , ):
_UpperCAmelCase = BertGenerationEncoder(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase , attention_mask=_UpperCamelCase )
_UpperCAmelCase = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase , ):
_UpperCAmelCase = True
_UpperCAmelCase = BertGenerationEncoder(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , encoder_attention_mask=_UpperCamelCase , )
_UpperCAmelCase = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase , ):
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = BertGenerationDecoder(config=_UpperCamelCase ).to(_UpperCamelCase ).eval()
# first forward pass
_UpperCAmelCase = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , encoder_attention_mask=_UpperCamelCase , use_cache=_UpperCamelCase , )
_UpperCAmelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_UpperCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_UpperCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 )
_UpperCAmelCase = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , encoder_attention_mask=_UpperCamelCase , output_hidden_states=_UpperCamelCase , )['''hidden_states'''][0]
_UpperCAmelCase = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , encoder_attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase , output_hidden_states=_UpperCamelCase , )['''hidden_states'''][0]
# select random slice
_UpperCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
_UpperCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 ) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , *_UpperCamelCase , ):
_UpperCAmelCase = BertGenerationDecoder(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase , attention_mask=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( A__ , A__ , A__ , unittest.TestCase ):
__A : Dict = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
__A : Tuple = (BertGenerationDecoder,) if is_torch_available() else ()
__A : Tuple = (
{"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder}
if is_torch_available()
else {}
)
def UpperCamelCase( self ):
_UpperCAmelCase = BertGenerationEncoderTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=37 )
def UpperCamelCase( self ):
self.config_tester.run_common_tests()
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase = '''bert'''
self.model_tester.create_and_check_model(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*_UpperCamelCase )
def UpperCamelCase( self ):
# This regression test was failing with PyTorch < 1.3
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
_UpperCAmelCase = None
self.model_tester.create_and_check_model_as_decoder(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*_UpperCamelCase )
@slow
def UpperCamelCase( self ):
_UpperCAmelCase = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
self.assertIsNotNone(_UpperCamelCase )
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@slow
def UpperCamelCase( self ):
_UpperCAmelCase = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
_UpperCAmelCase = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
_UpperCAmelCase = model(_UpperCamelCase )[0]
_UpperCAmelCase = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape , _UpperCamelCase )
_UpperCAmelCase = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCamelCase , atol=1e-4 ) )
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@slow
def UpperCamelCase( self ):
_UpperCAmelCase = BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
_UpperCAmelCase = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
_UpperCAmelCase = model(_UpperCamelCase )[0]
_UpperCAmelCase = torch.Size([1, 8, 50358] )
self.assertEqual(output.shape , _UpperCamelCase )
_UpperCAmelCase = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCamelCase , atol=1e-4 ) ) | 32 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
lowercase__ : Optional[int] = None
lowercase__ : List[str] = logging.get_logger(__name__)
lowercase__ : Optional[Any] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase__ : List[str] = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/tokenizer.json''',
},
}
lowercase__ : Dict = {
'''camembert-base''': 5_12,
}
lowercase__ : str = '''▁'''
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = ['''input_ids''', '''attention_mask''']
lowerCAmelCase = CamembertTokenizer
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] , **_UpperCAmelCase , ):
'''simple docstring'''
__A : int = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else mask_token
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
__A : List[str] = vocab_file
__A : Optional[int] = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__A : Optional[Any] = [self.cls_token_id]
__A : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
__A : Optional[int] = [self.sep_token_id]
__A : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(_UpperCAmelCase):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
__A : List[Any] = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(_UpperCAmelCase):
copyfile(self.vocab_file , _UpperCAmelCase)
return (out_vocab_file,) | 8 | 0 |
import os
from datetime import datetime as dt
from github import Github
lowerCamelCase__ : int = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""enhancement""",
"""new pipeline/model""",
"""new scheduler""",
"""wip""",
]
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
snake_case__ = Github(os.environ['''GITHUB_TOKEN'''] )
snake_case__ = g.get_repo('''huggingface/diffusers''' )
snake_case__ = repo.get_issues(state='''open''' )
for issue in open_issues:
snake_case__ = sorted(issue.get_comments() , key=lambda __lowerCAmelCase : i.created_at , reverse=__lowerCAmelCase )
snake_case__ = comments[0] if len(__lowerCAmelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main()
| 33 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
lowercase__ : Any = '''hf-internal-testing/tiny-random-bert'''
lowercase__ : Optional[Any] = os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''')
lowercase__ : List[Any] = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6'''
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase)
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(_UpperCAmelCase))
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(_UpperCAmelCase , _UpperCAmelCase)))
with open(os.path.join(_UpperCAmelCase , 'refs' , 'main')) as f:
__A : Any = f.read()
self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'snapshots' , _UpperCAmelCase , _UpperCAmelCase))
self.assertTrue(os.path.isfile(_UpperCAmelCase))
# File is cached at the same place the second time.
__A : Tuple = cached_file(_UpperCAmelCase , _UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase)
# Using a specific revision to test the full commit hash.
__A : List[Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='9b8c223')
self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'snapshots' , _UpperCAmelCase , _UpperCAmelCase))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid model identifier'):
__A : Dict = cached_file('tiny-random-bert' , _UpperCAmelCase)
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid git identifier'):
__A : Optional[int] = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='aaaa')
with self.assertRaisesRegex(_UpperCAmelCase , 'does not appear to have a file named'):
__A : int = cached_file(_UpperCAmelCase , 'conf')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaisesRegex(_UpperCAmelCase , 'does not appear to have a file named'):
__A : Any = cached_file(_UpperCAmelCase , 'conf')
with open(os.path.join(_UpperCAmelCase , 'refs' , 'main')) as f:
__A : Dict = f.read()
self.assertTrue(os.path.isfile(os.path.join(_UpperCAmelCase , '.no_exist' , _UpperCAmelCase , 'conf')))
__A : List[Any] = cached_file(_UpperCAmelCase , 'conf' , _raise_exceptions_for_missing_entries=_UpperCAmelCase)
self.assertIsNone(_UpperCAmelCase)
__A : str = cached_file(_UpperCAmelCase , 'conf' , local_files_only=_UpperCAmelCase , _raise_exceptions_for_missing_entries=_UpperCAmelCase)
self.assertIsNone(_UpperCAmelCase)
__A : List[str] = mock.Mock()
__A : Dict = 500
__A : List[str] = {}
__A : List[Any] = HTTPError
__A : Optional[Any] = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=_UpperCAmelCase) as mock_head:
__A : Dict = cached_file(_UpperCAmelCase , 'conf' , _raise_exceptions_for_connection_errors=_UpperCAmelCase)
self.assertIsNone(_UpperCAmelCase)
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.assertTrue(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase))
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase))
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.assertIsNone(get_file_from_repo('bert-base-cased' , 'ahah.txt'))
# The function raises if the repository does not exist.
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid model identifier'):
get_file_from_repo('bert-base-case' , _UpperCAmelCase)
# The function raises if the revision does not exist.
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid git identifier'):
get_file_from_repo('bert-base-cased' , _UpperCAmelCase , revision='ahaha')
__A : List[str] = get_file_from_repo('bert-base-cased' , _UpperCAmelCase)
# The name is the cached name which is not very easy to test, so instead we load the content.
__A : List[str] = json.loads(open(_UpperCAmelCase , 'r').read())
self.assertEqual(config['hidden_size'] , 768)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__A : Tuple = Path(_UpperCAmelCase) / 'a.txt'
filename.touch()
self.assertEqual(get_file_from_repo(_UpperCAmelCase , 'a.txt') , str(_UpperCAmelCase))
self.assertIsNone(get_file_from_repo(_UpperCAmelCase , 'b.txt')) | 8 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json',
}
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = '''data2vec-text'''
def __init__( self , lowerCamelCase_=3_0_5_2_2 , lowerCamelCase_=7_6_8 , lowerCamelCase_=1_2 , lowerCamelCase_=1_2 , lowerCamelCase_=3_0_7_2 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=5_1_2 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=1e-12 , lowerCamelCase_=1 , lowerCamelCase_=0 , lowerCamelCase_=2 , lowerCamelCase_="absolute" , lowerCamelCase_=True , lowerCamelCase_=None , **lowerCamelCase_ , ) -> List[str]:
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_)
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_act
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = position_embedding_type
UpperCamelCase = use_cache
UpperCamelCase = classifier_dropout
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
@property
def UpperCAmelCase__ ( self) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCamelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
]) | 34 |
'''simple docstring'''
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def _lowerCAmelCase ( __snake_case : str , __snake_case : str , **__snake_case : List[Any] ) -> Any:
__A : Optional[Any] = AutoConfig.from_pretrained(__snake_case , **__snake_case )
__A : int = AutoModelForSeqaSeqLM.from_config(__snake_case )
model.save_pretrained(__snake_case )
AutoTokenizer.from_pretrained(__snake_case ).save_pretrained(__snake_case )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version) | 8 | 0 |
def a ( A__ , A__ , A__ ) -> int:
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
SCREAMING_SNAKE_CASE__ : Tuple = _modexpt(A__ , exponent // 2 , A__ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(A__ , exponent - 1 , A__ )) % modulo_value
def a ( A__ = 1_7_7_7 , A__ = 1_8_5_5 , A__ = 8 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = base
for _ in range(1 , A__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = _modexpt(A__ , A__ , 1_0**digits )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 35 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
lowercase__ : Any = {
'''google/tapas-base-finetuned-sqa''': (
'''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wtq''': (
'''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wikisql-supervised''': (
'''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-tabfact''': (
'''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''tapas'''
def __init__( self , _UpperCAmelCase=3_0522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1024 , _UpperCAmelCase=[3, 256, 256, 2, 256, 256, 10] , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0 , _UpperCAmelCase=10.0 , _UpperCAmelCase=0 , _UpperCAmelCase=1.0 , _UpperCAmelCase=None , _UpperCAmelCase=1.0 , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=1.0 , _UpperCAmelCase=1.0 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase="ratio" , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=64 , _UpperCAmelCase=32 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase)
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
__A : Dict = vocab_size
__A : Tuple = hidden_size
__A : Any = num_hidden_layers
__A : int = num_attention_heads
__A : Tuple = hidden_act
__A : Tuple = intermediate_size
__A : List[Any] = hidden_dropout_prob
__A : int = attention_probs_dropout_prob
__A : List[str] = max_position_embeddings
__A : Optional[int] = type_vocab_sizes
__A : str = initializer_range
__A : List[str] = layer_norm_eps
# Fine-tuning task hyperparameters
__A : List[str] = positive_label_weight
__A : List[Any] = num_aggregation_labels
__A : Optional[Any] = aggregation_loss_weight
__A : Tuple = use_answer_as_supervision
__A : List[str] = answer_loss_importance
__A : Any = use_normalized_answer_loss
__A : Any = huber_loss_delta
__A : Union[str, Any] = temperature
__A : Tuple = aggregation_temperature
__A : Optional[Any] = use_gumbel_for_cells
__A : List[str] = use_gumbel_for_aggregation
__A : Tuple = average_approximation_function
__A : List[str] = cell_selection_preference
__A : Dict = answer_loss_cutoff
__A : Union[str, Any] = max_num_rows
__A : Optional[Any] = max_num_columns
__A : int = average_logits_per_cell
__A : Optional[Any] = select_one_column
__A : int = allow_empty_column_selection
__A : List[Any] = init_cell_selection_weights_to_zero
__A : int = reset_position_index_per_cell
__A : Union[str, Any] = disable_per_token_loss
# Aggregation hyperparameters
__A : Optional[Any] = aggregation_labels
__A : List[str] = no_aggregation_label_index
if isinstance(self.aggregation_labels , _UpperCAmelCase):
__A : Optional[Any] = {int(_UpperCAmelCase): v for k, v in aggregation_labels.items()} | 8 | 0 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
__lowercase : Optional[int] = get_logger(__name__)
class _A :
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
snake_case : Optional[int] = (
os.path.join(SCREAMING_SNAKE_CASE_ ,config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
snake_case : Union[str, Any] = Extractor
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
snake_case : str = os.path.abspath(SCREAMING_SNAKE_CASE_ )
return os.path.join(self.extract_dir ,hash_url_to_filename(SCREAMING_SNAKE_CASE_ ) )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return force_extract or (
not os.path.isfile(SCREAMING_SNAKE_CASE_ ) and not (os.path.isdir(SCREAMING_SNAKE_CASE_ ) and os.listdir(SCREAMING_SNAKE_CASE_ ))
)
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = False ):
'''simple docstring'''
snake_case : Optional[Any] = self.extractor.infer_extractor_format(SCREAMING_SNAKE_CASE_ )
if not extractor_format:
return input_path
snake_case : int = self._get_output_path(SCREAMING_SNAKE_CASE_ )
if self._do_extract(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
self.extractor.extract(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
return output_path
class _A ( snake_case ):
'''simple docstring'''
@classmethod
@abstractmethod
def snake_case_ ( cls ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
...
@staticmethod
@abstractmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
...
class _A ( snake_case , snake_case ):
'''simple docstring'''
__lowerCamelCase : List[bytes] = []
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE_ ,"""rb""" ) as f:
return f.read(SCREAMING_SNAKE_CASE_ )
@classmethod
def snake_case_ ( cls ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = b"" ):
'''simple docstring'''
if not magic_number:
snake_case : str = max(len(SCREAMING_SNAKE_CASE_ ) for cls_magic_number in cls.magic_numbers )
try:
snake_case : Dict = cls.read_magic_number(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
except OSError:
return False
return any(magic_number.startswith(SCREAMING_SNAKE_CASE_ ) for cls_magic_number in cls.magic_numbers )
class _A ( snake_case ):
'''simple docstring'''
@classmethod
def snake_case_ ( cls ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return tarfile.is_tarfile(SCREAMING_SNAKE_CASE_ )
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def resolved(SCREAMING_SNAKE_CASE_ ) -> str:
return os.path.realpath(os.path.abspath(SCREAMING_SNAKE_CASE_ ) )
def badpath(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) ).startswith(SCREAMING_SNAKE_CASE_ )
def badlink(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> bool:
# Links are interpreted relative to the directory containing the link
snake_case : Optional[Any] = resolved(os.path.join(SCREAMING_SNAKE_CASE_ ,os.path.dirname(info.name ) ) )
return badpath(info.linkname ,base=SCREAMING_SNAKE_CASE_ )
snake_case : Dict = resolved(SCREAMING_SNAKE_CASE_ )
for finfo in members:
if badpath(finfo.name ,SCREAMING_SNAKE_CASE_ ):
logger.error(F"""Extraction of {finfo.name} is blocked (illegal path)""" )
elif finfo.issym() and badlink(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
logger.error(F"""Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}""" )
elif finfo.islnk() and badlink(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
logger.error(F"""Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}""" )
else:
yield finfo
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
os.makedirs(SCREAMING_SNAKE_CASE_ ,exist_ok=SCREAMING_SNAKE_CASE_ )
snake_case : List[str] = tarfile.open(SCREAMING_SNAKE_CASE_ )
tar_file.extractall(SCREAMING_SNAKE_CASE_ ,members=TarExtractor.safemembers(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) )
tar_file.close()
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : str = [B'''\x1F\x8B''']
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
with gzip.open(SCREAMING_SNAKE_CASE_ ,"""rb""" ) as gzip_file:
with open(SCREAMING_SNAKE_CASE_ ,"""wb""" ) as extracted_file:
shutil.copyfileobj(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : List[str] = [
B'''PK\x03\x04''',
B'''PK\x05\x06''', # empty archive
B'''PK\x07\x08''', # spanned archive
]
@classmethod
def snake_case_ ( cls ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = b"" ):
'''simple docstring'''
if super().is_extractable(SCREAMING_SNAKE_CASE_ ,magic_number=SCREAMING_SNAKE_CASE_ ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(SCREAMING_SNAKE_CASE_ ,"""rb""" ) as fp:
snake_case : List[Any] = _EndRecData(SCREAMING_SNAKE_CASE_ )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
snake_case : List[Any] = fp.read(SCREAMING_SNAKE_CASE_ ) # CD is where we expect it to be
if len(SCREAMING_SNAKE_CASE_ ) == sizeCentralDir:
snake_case : str = struct.unpack(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
os.makedirs(SCREAMING_SNAKE_CASE_ ,exist_ok=SCREAMING_SNAKE_CASE_ )
with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ ,"""r""" ) as zip_file:
zip_file.extractall(SCREAMING_SNAKE_CASE_ )
zip_file.close()
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = [B'''\xFD\x37\x7A\x58\x5A\x00''']
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
with lzma.open(SCREAMING_SNAKE_CASE_ ) as compressed_file:
with open(SCREAMING_SNAKE_CASE_ ,"""wb""" ) as extracted_file:
shutil.copyfileobj(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = [B'''Rar!\x1a\x07\x00''', B'''Rar!\x1a\x07\x01\x00'''] # RAR_ID # RAR5_ID
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if not config.RARFILE_AVAILABLE:
raise ImportError("""Please pip install rarfile""" )
import rarfile
os.makedirs(SCREAMING_SNAKE_CASE_ ,exist_ok=SCREAMING_SNAKE_CASE_ )
snake_case : Optional[Any] = rarfile.RarFile(SCREAMING_SNAKE_CASE_ )
rf.extractall(SCREAMING_SNAKE_CASE_ )
rf.close()
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = [B'''\x28\xb5\x2F\xFD''']
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("""Please pip install zstandard""" )
import zstandard as zstd
snake_case : Any = zstd.ZstdDecompressor()
with open(SCREAMING_SNAKE_CASE_ ,"""rb""" ) as ifh, open(SCREAMING_SNAKE_CASE_ ,"""wb""" ) as ofh:
dctx.copy_stream(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : List[Any] = [B'''\x42\x5A\x68''']
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
with bza.open(SCREAMING_SNAKE_CASE_ ,"""rb""" ) as compressed_file:
with open(SCREAMING_SNAKE_CASE_ ,"""wb""" ) as extracted_file:
shutil.copyfileobj(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Dict = [B'''\x37\x7A\xBC\xAF\x27\x1C''']
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if not config.PY7ZR_AVAILABLE:
raise ImportError("""Please pip install py7zr""" )
import pyazr
os.makedirs(SCREAMING_SNAKE_CASE_ ,exist_ok=SCREAMING_SNAKE_CASE_ )
with pyazr.SevenZipFile(SCREAMING_SNAKE_CASE_ ,"""r""" ) as archive:
archive.extractall(SCREAMING_SNAKE_CASE_ )
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Any = [B'''\x04\x22\x4D\x18''']
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if not config.LZ4_AVAILABLE:
raise ImportError("""Please pip install lz4""" )
import lza.frame
with lza.frame.open(SCREAMING_SNAKE_CASE_ ,"""rb""" ) as compressed_file:
with open(SCREAMING_SNAKE_CASE_ ,"""wb""" ) as extracted_file:
shutil.copyfileobj(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
class _A :
'''simple docstring'''
__lowerCamelCase : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def snake_case_ ( cls ):
'''simple docstring'''
return max(
len(SCREAMING_SNAKE_CASE_ )
for extractor in cls.extractors.values()
if issubclass(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
try:
return MagicNumberBaseExtractor.read_magic_number(SCREAMING_SNAKE_CASE_ ,magic_number_length=SCREAMING_SNAKE_CASE_ )
except OSError:
return b""
@classmethod
def snake_case_ ( cls ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = False ):
'''simple docstring'''
warnings.warn(
"""Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'infer_extractor_format' instead.""" ,category=SCREAMING_SNAKE_CASE_ ,)
snake_case : Union[str, Any] = cls.infer_extractor_format(SCREAMING_SNAKE_CASE_ )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def snake_case_ ( cls ,SCREAMING_SNAKE_CASE_ ): # <Added version="2.4.0"/>
'''simple docstring'''
snake_case : str = cls._get_magic_number_max_length()
snake_case : Optional[Any] = cls._read_magic_number(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(SCREAMING_SNAKE_CASE_ ,magic_number=SCREAMING_SNAKE_CASE_ ):
return extractor_format
@classmethod
def snake_case_ ( cls ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = "deprecated" ,):
'''simple docstring'''
os.makedirs(os.path.dirname(SCREAMING_SNAKE_CASE_ ) ,exist_ok=SCREAMING_SNAKE_CASE_ )
# Prevent parallel extractions
snake_case : str = str(Path(SCREAMING_SNAKE_CASE_ ).with_suffix(""".lock""" ) )
with FileLock(SCREAMING_SNAKE_CASE_ ):
shutil.rmtree(SCREAMING_SNAKE_CASE_ ,ignore_errors=SCREAMING_SNAKE_CASE_ )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ): # passed as positional arg
warnings.warn(
"""Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'extractor_format' instead.""" ,category=SCREAMING_SNAKE_CASE_ ,)
snake_case : Dict = extractor if extractor != """deprecated""" else extractor_format
else:
snake_case : Optional[Any] = cls.extractors[extractor_format]
return extractor.extract(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
else:
warnings.warn(
"""Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """
"""exception in 3.0.0.""" ,category=SCREAMING_SNAKE_CASE_ ,)
for extractor in cls.extractors.values():
if extractor.is_extractable(SCREAMING_SNAKE_CASE_ ):
return extractor.extract(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
| 36 |
'''simple docstring'''
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=sys.maxsize):
'''simple docstring'''
__A : Union[str, Any] = 'bilinear'
__A : int = max_size
__A : Optional[Any] = short_edge_length
def __call__( self , _UpperCAmelCase):
'''simple docstring'''
__A : int = []
for img in imgs:
__A ,__A : Dict = img.shape[:2]
# later: provide list and randomly choose index for resize
__A : List[Any] = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1)
if size == 0:
return img
__A : Tuple = size * 1.0 / min(_UpperCAmelCase , _UpperCAmelCase)
if h < w:
__A ,__A : Optional[Any] = size, scale * w
else:
__A ,__A : Optional[Any] = scale * h, size
if max(_UpperCAmelCase , _UpperCAmelCase) > self.max_size:
__A : Tuple = self.max_size * 1.0 / max(_UpperCAmelCase , _UpperCAmelCase)
__A : Tuple = newh * scale
__A : Dict = neww * scale
__A : Dict = int(neww + 0.5)
__A : Optional[int] = int(newh + 0.5)
if img.dtype == np.uinta:
__A : int = Image.fromarray(_UpperCAmelCase)
__A : Optional[int] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR)
__A : Dict = np.asarray(_UpperCAmelCase)
else:
__A : Optional[Any] = img.permute(2 , 0 , 1).unsqueeze(0) # 3, 0, 1) # hw(c) -> nchw
__A : Dict = nn.functional.interpolate(
_UpperCAmelCase , (newh, neww) , mode=self.interp_method , align_corners=_UpperCAmelCase).squeeze(0)
img_augs.append(_UpperCAmelCase)
return img_augs
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST)
__A : List[Any] = cfg.INPUT.FORMAT
__A : Dict = cfg.SIZE_DIVISIBILITY
__A : str = cfg.PAD_VALUE
__A : Union[str, Any] = cfg.INPUT.MAX_SIZE_TEST
__A : int = cfg.MODEL.DEVICE
__A : Tuple = torch.tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
__A : Union[str, Any] = torch.tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
__A : int = lambda _UpperCAmelCase: (x - self.pixel_mean) / self.pixel_std
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = tuple(max(_UpperCAmelCase) for s in zip(*[img.shape for img in images]))
__A : Dict = [im.shape[-2:] for im in images]
__A : Optional[int] = [
nn.functional.pad(
_UpperCAmelCase , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(_UpperCAmelCase , _UpperCAmelCase)
]
return torch.stack(_UpperCAmelCase), torch.tensor(_UpperCAmelCase)
def __call__( self , _UpperCAmelCase , _UpperCAmelCase=False):
'''simple docstring'''
with torch.no_grad():
if not isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : int = [images]
if single_image:
assert len(_UpperCAmelCase) == 1
for i in range(len(_UpperCAmelCase)):
if isinstance(images[i] , torch.Tensor):
images.insert(_UpperCAmelCase , images.pop(_UpperCAmelCase).to(self.device).float())
elif not isinstance(images[i] , torch.Tensor):
images.insert(
_UpperCAmelCase , torch.as_tensor(img_tensorize(images.pop(_UpperCAmelCase) , input_format=self.input_format))
.to(self.device)
.float() , )
# resize smallest edge
__A : str = torch.tensor([im.shape[:2] for im in images])
__A : List[str] = self.aug(_UpperCAmelCase)
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
__A : Any = [self.normalizer(_UpperCAmelCase) for x in images]
# now pad them to do the following operations
__A ,__A : Any = self.pad(_UpperCAmelCase)
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
__A : str = torch.true_divide(_UpperCAmelCase , _UpperCAmelCase)
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : str ) -> Dict:
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _lowerCAmelCase ( __snake_case : List[str] , __snake_case : Tuple[int, int] ) -> int:
assert torch.isfinite(__snake_case ).all(), "Box tensor contains infinite or NaN!"
__A ,__A : int = box_size
tensor[:, 0].clamp_(min=0 , max=__snake_case )
tensor[:, 1].clamp_(min=0 , max=__snake_case )
tensor[:, 2].clamp_(min=0 , max=__snake_case )
tensor[:, 3].clamp_(min=0 , max=__snake_case ) | 8 | 0 |
def UpperCamelCase_ ( __a ) -> list[int]:
a__ : str = len(__a )
for i in range(__a ):
for j in range(i + 1 , __a ):
if numbers[j] < numbers[i]:
a__, a__ : List[Any] = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
UpperCamelCase : Dict = input("""Enter numbers separated by a comma:\n""").strip()
UpperCamelCase : Any = [int(item) for item in user_input.split(""",""")]
print(exchange_sort(unsorted))
| 37 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> Optional[Any]: # noqa: E741
__A : Tuple = len(__snake_case )
__A : Optional[int] = 0
__A : str = [0] * n
__A : int = [False] * n
__A : Tuple = [False] * n
def dfs(__snake_case : List[str] , __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : int ):
if parent == root:
out_edge_count += 1
__A : str = True
__A : Tuple = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
__A : Optional[int] = dfs(__snake_case , __snake_case , __snake_case , __snake_case )
__A : int = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
__A : Tuple = True
# AP found via cycle
if at == low[to]:
__A : Optional[Any] = True
else:
__A : Any = min(low[at] , __snake_case )
return out_edge_count
for i in range(__snake_case ):
if not visited[i]:
__A : Tuple = 0
__A : List[Any] = dfs(__snake_case , __snake_case , -1 , __snake_case )
__A : Union[str, Any] = out_edge_count > 1
for x in range(len(__snake_case ) ):
if is_art[x] is True:
print(__snake_case )
# Adjacency list of graph
lowercase__ : Tuple = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data) | 8 | 0 |
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
A_ : Optional[Any] = "\\n\n"
A_ : int = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n"
A_ : Any = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to 'cuda' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id='gpt2',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!='']\n >>> results = perplexity.compute(model_id='gpt2',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
'''simple docstring'''
def __UpperCamelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""input_texts""": datasets.Value("""string""" ),
} ) , reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""] , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 1_6 , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
snake_case__ : int = """cuda"""
else:
snake_case__ : Any = """cuda""" if torch.cuda.is_available() else """cpu"""
snake_case__ : int = AutoModelForCausalLM.from_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = model.to(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
snake_case__ : List[Any] = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(__SCREAMING_SNAKE_CASE ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
snake_case__ : Dict = model.config.max_length - 1
else:
snake_case__ : List[str] = model.config.max_length
snake_case__ : str = tokenizer(
__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" , return_attention_mask=__SCREAMING_SNAKE_CASE , ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = encodings["""input_ids"""]
snake_case__ : Union[str, Any] = encodings["""attention_mask"""]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
snake_case__ : int = []
snake_case__ : Optional[Any] = CrossEntropyLoss(reduction="""none""" )
for start_index in logging.tqdm(range(0 , len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) ):
snake_case__ : Optional[int] = min(start_index + batch_size , len(__SCREAMING_SNAKE_CASE ) )
snake_case__ : int = encoded_texts[start_index:end_index]
snake_case__ : Optional[Any] = attn_masks[start_index:end_index]
if add_start_token:
snake_case__ : Any = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : int = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
snake_case__ : Optional[Any] = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(__SCREAMING_SNAKE_CASE ), attn_mask] , dim=1 )
snake_case__ : Union[str, Any] = encoded_batch
with torch.no_grad():
snake_case__ : Any = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).logits
snake_case__ : Optional[Any] = out_logits[..., :-1, :].contiguous()
snake_case__ : Tuple = labels[..., 1:].contiguous()
snake_case__ : Tuple = attn_mask[..., 1:].contiguous()
snake_case__ : Optional[Any] = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , __SCREAMING_SNAKE_CASE ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(__SCREAMING_SNAKE_CASE )}
| 38 |
'''simple docstring'''
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
lowercase__ : int = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn.grep_linear''': '''encoder.layers.*.attention.gru_rel_pos_linear''',
'''self_attn.relative_attention_bias''': '''encoder.layers.*.attention.rel_attn_embed''',
'''self_attn.grep_a''': '''encoder.layers.*.attention.gru_rel_pos_const''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
lowercase__ : Dict = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def _lowerCAmelCase ( __snake_case : int , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Any , __snake_case : List[str] ) -> Union[str, Any]:
for attribute in key.split('.' ):
__A : int = getattr(__snake_case , __snake_case )
if weight_type is not None:
__A : Optional[int] = getattr(__snake_case , __snake_case ).shape
else:
__A : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
__A : Tuple = value
elif weight_type == "weight_g":
__A : Union[str, Any] = value
elif weight_type == "weight_v":
__A : Optional[Any] = value
elif weight_type == "bias":
__A : Optional[int] = value
else:
__A : Optional[int] = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _lowerCAmelCase ( __snake_case : Any , __snake_case : List[str] ) -> List[Any]:
__A : Optional[Any] = []
__A : Any = fairseq_model.state_dict()
__A : Union[str, Any] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
__A : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == 'group' , )
__A : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__A : int = True
if "*" in mapped_key:
__A : Any = name.split(__snake_case )[0].split('.' )[-2]
__A : List[Any] = mapped_key.replace('*' , __snake_case )
if "weight_g" in name:
__A : Optional[Any] = 'weight_g'
elif "weight_v" in name:
__A : Union[str, Any] = 'weight_v'
elif "bias" in name and "relative_attention_bias" not in name:
__A : Optional[Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__A : Tuple = 'weight'
else:
__A : Dict = None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(f'Unused weights: {unused_weights}' )
def _lowerCAmelCase ( __snake_case : List[str] , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Optional[int] ) -> int:
__A : int = full_name.split('conv_layers.' )[-1]
__A : List[str] = name.split('.' )
__A : Optional[int] = int(items[0] )
__A : str = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
__A : Optional[int] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
__A : Union[str, Any] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
__A : Dict = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
__A : Any = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__snake_case )
@torch.no_grad()
def _lowerCAmelCase ( __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Tuple=None ) -> Any:
# load the pre-trained checkpoints
__A : List[str] = torch.load(__snake_case )
__A : Dict = WavLMConfigOrig(checkpoint['cfg'] )
__A : Optional[int] = WavLMOrig(__snake_case )
model.load_state_dict(checkpoint['model'] )
model.eval()
if config_path is not None:
__A : List[Any] = WavLMConfig.from_pretrained(__snake_case )
else:
__A : Dict = WavLMConfig()
__A : Optional[Any] = WavLMModel(__snake_case )
recursively_load_weights(__snake_case , __snake_case )
hf_wavlm.save_pretrained(__snake_case )
if __name__ == "__main__":
lowercase__ : Tuple = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowercase__ : Any = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 8 | 0 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ):
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
snake_case_ = cst_fwd.get(SCREAMING_SNAKE_CASE__ , np.inf )
snake_case_ = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
snake_case_ = new_cost_f
snake_case_ = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
snake_case_ = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = -1
snake_case_ = set()
snake_case_ = set()
snake_case_ = {source: 0}
snake_case_ = {destination: 0}
snake_case_ = {source: None}
snake_case_ = {destination: None}
snake_case_ = PriorityQueue()
snake_case_ = PriorityQueue()
snake_case_ = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
snake_case_, snake_case_ = queue_forward.get()
visited_forward.add(SCREAMING_SNAKE_CASE__ )
snake_case_, snake_case_ = queue_backward.get()
visited_backward.add(SCREAMING_SNAKE_CASE__ )
snake_case_ = pass_and_relaxation(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
snake_case_ = pass_and_relaxation(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
snake_case_ = shortest_distance
return shortest_path_distance
lowerCAmelCase_ = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
lowerCAmelCase_ = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod() | 39 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = 42
class SCREAMING_SNAKE_CASE (a__ , a__ ):
@register_to_config
def __init__( self , _UpperCAmelCase = 6_5536 , _UpperCAmelCase = None , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , _UpperCAmelCase = 0 , _UpperCAmelCase = "fourier" , _UpperCAmelCase = True , _UpperCAmelCase = False , _UpperCAmelCase = 0.0 , _UpperCAmelCase = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , _UpperCAmelCase = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , _UpperCAmelCase = "UNetMidBlock1D" , _UpperCAmelCase = None , _UpperCAmelCase = (32, 32, 64) , _UpperCAmelCase = None , _UpperCAmelCase = 8 , _UpperCAmelCase = 1 , _UpperCAmelCase = False , ):
'''simple docstring'''
super().__init__()
__A : Dict = sample_size
# time
if time_embedding_type == "fourier":
__A : int = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=_UpperCAmelCase , log=_UpperCAmelCase , flip_sin_to_cos=_UpperCAmelCase)
__A : Any = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
__A : List[str] = Timesteps(
block_out_channels[0] , flip_sin_to_cos=_UpperCAmelCase , downscale_freq_shift=_UpperCAmelCase)
__A : List[str] = block_out_channels[0]
if use_timestep_embedding:
__A : Optional[Any] = block_out_channels[0] * 4
__A : Optional[int] = TimestepEmbedding(
in_channels=_UpperCAmelCase , time_embed_dim=_UpperCAmelCase , act_fn=_UpperCAmelCase , out_dim=block_out_channels[0] , )
__A : Dict = nn.ModuleList([])
__A : Dict = None
__A : Tuple = nn.ModuleList([])
__A : Tuple = None
# down
__A : Any = in_channels
for i, down_block_type in enumerate(_UpperCAmelCase):
__A : Tuple = output_channel
__A : Optional[Any] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
__A : List[str] = i == len(_UpperCAmelCase) - 1
__A : int = get_down_block(
_UpperCAmelCase , num_layers=_UpperCAmelCase , in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(_UpperCAmelCase)
# mid
__A : str = get_mid_block(
_UpperCAmelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=_UpperCAmelCase , add_downsample=_UpperCAmelCase , )
# up
__A : Optional[int] = list(reversed(_UpperCAmelCase))
__A : Optional[int] = reversed_block_out_channels[0]
if out_block_type is None:
__A : str = out_channels
else:
__A : List[Any] = block_out_channels[0]
for i, up_block_type in enumerate(_UpperCAmelCase):
__A : Optional[Any] = output_channel
__A : Optional[Any] = (
reversed_block_out_channels[i + 1] if i < len(_UpperCAmelCase) - 1 else final_upsample_channels
)
__A : Dict = i == len(_UpperCAmelCase) - 1
__A : str = get_up_block(
_UpperCAmelCase , num_layers=_UpperCAmelCase , in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(_UpperCAmelCase)
__A : Optional[int] = output_channel
# out
__A : str = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32)
__A : Optional[Any] = get_out_block(
out_block_type=_UpperCAmelCase , num_groups_out=_UpperCAmelCase , embed_dim=block_out_channels[0] , out_channels=_UpperCAmelCase , act_fn=_UpperCAmelCase , fc_dim=block_out_channels[-1] // 4 , )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True , ):
'''simple docstring'''
__A : Any = timestep
if not torch.is_tensor(_UpperCAmelCase):
__A : Any = torch.tensor([timesteps] , dtype=torch.long , device=sample.device)
elif torch.is_tensor(_UpperCAmelCase) and len(timesteps.shape) == 0:
__A : Any = timesteps[None].to(sample.device)
__A : List[Any] = self.time_proj(_UpperCAmelCase)
if self.config.use_timestep_embedding:
__A : Dict = self.time_mlp(_UpperCAmelCase)
else:
__A : Dict = timestep_embed[..., None]
__A : Tuple = timestep_embed.repeat([1, 1, sample.shape[2]]).to(sample.dtype)
__A : List[Any] = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]))
# 2. down
__A : int = ()
for downsample_block in self.down_blocks:
__A ,__A : int = downsample_block(hidden_states=_UpperCAmelCase , temb=_UpperCAmelCase)
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
__A : Optional[int] = self.mid_block(_UpperCAmelCase , _UpperCAmelCase)
# 4. up
for i, upsample_block in enumerate(self.up_blocks):
__A : Any = down_block_res_samples[-1:]
__A : Optional[int] = down_block_res_samples[:-1]
__A : Any = upsample_block(_UpperCAmelCase , res_hidden_states_tuple=_UpperCAmelCase , temb=_UpperCAmelCase)
# 5. post-process
if self.out_block:
__A : Dict = self.out_block(_UpperCAmelCase , _UpperCAmelCase)
if not return_dict:
return (sample,)
return UNetaDOutput(sample=_UpperCAmelCase) | 8 | 0 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__UpperCAmelCase = '''src/diffusers'''
__UpperCAmelCase = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
__UpperCAmelCase = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
__UpperCAmelCase = spec.loader.load_module()
def UpperCamelCase ( snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] ) -> Union[str, Any]:
return line.startswith(snake_case__ ) or len(snake_case__ ) <= 1 or re.search(R'^\s*\)(\s*->.*:|:)\s*$' , snake_case__ ) is not None
def UpperCamelCase ( snake_case__ : str ) -> Optional[int]:
UpperCamelCase : Optional[Any] = object_name.split('.' )
UpperCamelCase : Optional[int] = 0
# First let's find the module where our object lives.
UpperCamelCase : List[Any] = parts[i]
while i < len(snake_case__ ) and not os.path.isfile(os.path.join(snake_case__ , F"""{module}.py""" ) ):
i += 1
if i < len(snake_case__ ):
UpperCamelCase : Tuple = os.path.join(snake_case__ , parts[i] )
if i >= len(snake_case__ ):
raise ValueError(F"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(snake_case__ , F"""{module}.py""" ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
UpperCamelCase : str = f.readlines()
# Now let's find the class / func in the code!
UpperCamelCase : Optional[int] = ''
UpperCamelCase : str = 0
for name in parts[i + 1 :]:
while (
line_index < len(snake_case__ ) and re.search(RF"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(snake_case__ ):
raise ValueError(F""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
UpperCamelCase : Optional[int] = line_index
while line_index < len(snake_case__ ) and _should_continue(lines[line_index] , snake_case__ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
UpperCamelCase : int = lines[start_index:line_index]
return "".join(snake_case__ )
__UpperCAmelCase = re.compile(r'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
__UpperCAmelCase = re.compile(r'''^\s*(\S+)->(\S+)(\s+.*|$)''')
__UpperCAmelCase = re.compile(r'''<FILL\s+[^>]*>''')
def UpperCamelCase ( snake_case__ : str ) -> Tuple:
UpperCamelCase : int = code.split('\n' )
UpperCamelCase : Any = 0
while idx < len(snake_case__ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(snake_case__ ):
return re.search(R'^(\s*)\S' , lines[idx] ).groups()[0]
return ""
def UpperCamelCase ( snake_case__ : Dict ) -> Union[str, Any]:
UpperCamelCase : Union[str, Any] = len(get_indent(snake_case__ ) ) > 0
if has_indent:
UpperCamelCase : int = F"""class Bla:\n{code}"""
UpperCamelCase : Optional[int] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=snake_case__ )
UpperCamelCase : List[str] = black.format_str(snake_case__ , mode=snake_case__ )
UpperCamelCase , UpperCamelCase : Tuple = style_docstrings_in_code(snake_case__ )
return result[len('class Bla:\n' ) :] if has_indent else result
def UpperCamelCase ( snake_case__ : int , snake_case__ : Optional[int]=False ) -> Optional[Any]:
with open(snake_case__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
UpperCamelCase : Union[str, Any] = f.readlines()
UpperCamelCase : Dict = []
UpperCamelCase : Optional[int] = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(snake_case__ ):
UpperCamelCase : str = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[Any] = search.groups()
UpperCamelCase : Any = find_code_in_diffusers(snake_case__ )
UpperCamelCase : str = get_indent(snake_case__ )
UpperCamelCase : List[Any] = line_index + 1 if indent == theoretical_indent else line_index + 2
UpperCamelCase : Tuple = theoretical_indent
UpperCamelCase : str = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
UpperCamelCase : Union[str, Any] = True
while line_index < len(snake_case__ ) and should_continue:
line_index += 1
if line_index >= len(snake_case__ ):
break
UpperCamelCase : Optional[Any] = lines[line_index]
UpperCamelCase : str = _should_continue(snake_case__ , snake_case__ ) and re.search(F"""^{indent}# End copy""" , snake_case__ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
UpperCamelCase : int = lines[start_index:line_index]
UpperCamelCase : int = ''.join(snake_case__ )
# Remove any nested `Copied from` comments to avoid circular copies
UpperCamelCase : List[str] = [line for line in theoretical_code.split('\n' ) if _re_copy_warning.search(snake_case__ ) is None]
UpperCamelCase : List[str] = '\n'.join(snake_case__ )
# Before comparing, use the `replace_pattern` on the original code.
if len(snake_case__ ) > 0:
UpperCamelCase : List[Any] = replace_pattern.replace('with' , '' ).split(',' )
UpperCamelCase : Optional[Any] = [_re_replace_pattern.search(snake_case__ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
UpperCamelCase , UpperCamelCase , UpperCamelCase : Union[str, Any] = pattern.groups()
UpperCamelCase : Optional[int] = re.sub(snake_case__ , snake_case__ , snake_case__ )
if option.strip() == "all-casing":
UpperCamelCase : str = re.sub(obja.lower() , obja.lower() , snake_case__ )
UpperCamelCase : Dict = re.sub(obja.upper() , obja.upper() , snake_case__ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
UpperCamelCase : Union[str, Any] = blackify(lines[start_index - 1] + theoretical_code )
UpperCamelCase : Dict = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
UpperCamelCase : Optional[int] = lines[:start_index] + [theoretical_code] + lines[line_index:]
UpperCamelCase : List[str] = start_index + 1
if overwrite and len(snake_case__ ) > 0:
# Warn the user a file has been modified.
print(F"""Detected changes, rewriting {filename}.""" )
with open(snake_case__ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(snake_case__ )
return diffs
def UpperCamelCase ( snake_case__ : bool = False ) -> int:
UpperCamelCase : Tuple = glob.glob(os.path.join(snake_case__ , '**/*.py' ) , recursive=snake_case__ )
UpperCamelCase : List[str] = []
for filename in all_files:
UpperCamelCase : Dict = is_copy_consistent(snake_case__ , snake_case__ )
diffs += [F"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(snake_case__ ) > 0:
UpperCamelCase : Optional[Any] = '\n'.join(snake_case__ )
raise Exception(
'Found the following copy inconsistencies:\n'
+ diff
+ '\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__UpperCAmelCase = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 40 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : str , __snake_case : str ) -> int:
if len(__snake_case ) != len(__snake_case ):
raise ValueError('String lengths must match!' )
__A : Optional[Any] = 0
for chara, chara in zip(__snake_case , __snake_case ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 0 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _A ( ):
"""simple docstring"""
__lowercase = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=A__ , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=A__ , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=A__ )
return parser.parse_args()
def _A ( ):
"""simple docstring"""
__lowercase = parse_args()
# Import training_script as a module.
__lowercase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__lowercase = script_fpath.stem
__lowercase = importlib.import_module(A__ )
# Patch sys.argv
__lowercase = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 41 |
'''simple docstring'''
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Tuple = logging.get_logger(__name__)
def _lowerCAmelCase ( __snake_case : str , __snake_case : str ) -> Union[str, Any]:
__A : int = RobertaPreLayerNormConfig.from_pretrained(
__snake_case , architectures=['RobertaPreLayerNormForMaskedLM'] )
# convert state_dict
__A : Tuple = torch.load(hf_hub_download(repo_id=__snake_case , filename='pytorch_model.bin' ) )
__A : str = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('roberta.' ):
__A : Dict = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ):
continue
__A : str = tensor_value
__A : Union[str, Any] = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=__snake_case , config=__snake_case , state_dict=__snake_case )
model.save_pretrained(__snake_case )
# convert tokenizer
__A : List[Any] = AutoTokenizer.from_pretrained(__snake_case )
tokenizer.save_pretrained(__snake_case )
if __name__ == "__main__":
lowercase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint-repo''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowercase__ : Optional[Any] = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path) | 8 | 0 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase__ )
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = field(default='text-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
SCREAMING_SNAKE_CASE_ = Features({'text': Value('string' )} )
SCREAMING_SNAKE_CASE_ = Features({'labels': ClassLabel} )
SCREAMING_SNAKE_CASE_ = "text"
SCREAMING_SNAKE_CASE_ = "labels"
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Any:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , SCREAMING_SNAKE_CASE_ ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
lowerCamelCase_ = copy.deepcopy(self )
lowerCamelCase_ = self.label_schema.copy()
lowerCamelCase_ = features[self.label_column]
lowerCamelCase_ = label_schema
return task_template
@property
def UpperCamelCase( self ) -> Dict[str, str]:
'''simple docstring'''
return {
self.text_column: "text",
self.label_column: "labels",
}
| 42 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
lowercase__ : Dict = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = field(default=a__ , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
lowerCAmelCase = field(
default=a__ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
lowerCAmelCase = field(
default=a__ , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
lowerCAmelCase = field(
default=a__ , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
lowerCAmelCase = field(
default=a__ , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = super().to_dict()
for k, v in d.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : List[Any] = v.to_dict()
return d | 8 | 0 |
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE ): # This function is recursive
"""simple docstring"""
lowercase__ = len(SCREAMING_SNAKE_CASE )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowercase__ = array[0]
lowercase__ = False
lowercase__ = 1
lowercase__ = []
while not is_found and i < array_length:
if array[i] < pivot:
lowercase__ = True
lowercase__ = [element for element in array[i:] if element >= array[i]]
lowercase__ = longest_subsequence(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > len(SCREAMING_SNAKE_CASE ):
lowercase__ = temp_array
else:
i += 1
lowercase__ = [element for element in array[1:] if element >= pivot]
lowercase__ = [pivot, *longest_subsequence(SCREAMING_SNAKE_CASE )]
if len(SCREAMING_SNAKE_CASE ) > len(SCREAMING_SNAKE_CASE ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : List[Any] = logging.get_logger(__name__)
lowercase__ : Optional[int] = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''lxmert'''
lowerCAmelCase = {}
def __init__( self , _UpperCAmelCase=3_0522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=9500 , _UpperCAmelCase=1600 , _UpperCAmelCase=400 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=9 , _UpperCAmelCase=5 , _UpperCAmelCase=5 , _UpperCAmelCase=2048 , _UpperCAmelCase=4 , _UpperCAmelCase=6.67 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , **_UpperCAmelCase , ):
'''simple docstring'''
__A : Tuple = vocab_size
__A : int = hidden_size
__A : str = num_attention_heads
__A : Tuple = hidden_act
__A : int = intermediate_size
__A : str = hidden_dropout_prob
__A : Optional[int] = attention_probs_dropout_prob
__A : Optional[Any] = max_position_embeddings
__A : Tuple = type_vocab_size
__A : Optional[int] = initializer_range
__A : Any = layer_norm_eps
__A : Optional[Any] = num_qa_labels
__A : Optional[int] = num_object_labels
__A : Any = num_attr_labels
__A : Union[str, Any] = l_layers
__A : Optional[int] = x_layers
__A : List[Any] = r_layers
__A : Tuple = visual_feat_dim
__A : Tuple = visual_pos_dim
__A : Optional[int] = visual_loss_normalizer
__A : int = task_matched
__A : List[Any] = task_mask_lm
__A : Optional[Any] = task_obj_predict
__A : str = task_qa
__A : List[Any] = visual_obj_loss
__A : Optional[Any] = visual_attr_loss
__A : Union[str, Any] = visual_feat_loss
__A : Union[str, Any] = {'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers}
super().__init__(**_UpperCAmelCase) | 8 | 0 |
'''simple docstring'''
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'The RoBERTa Model transformer with early exiting (DeeRoBERTa). ' , A , )
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = RobertaConfig
lowerCAmelCase_ = 'roberta'
def __init__( self : int,__A : Optional[Any] ):
super().__init__(__A )
_lowerCamelCase : int = RobertaEmbeddings(__A )
self.init_weights()
@add_start_docstrings(
'RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ' , A , )
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = RobertaConfig
lowerCAmelCase_ = 'roberta'
def __init__( self : Dict,__A : int ):
super().__init__(__A )
_lowerCamelCase : Tuple = config.num_labels
_lowerCamelCase : Union[str, Any] = config.num_hidden_layers
_lowerCamelCase : List[Any] = DeeRobertaModel(__A )
_lowerCamelCase : Optional[Any] = nn.Dropout(config.hidden_dropout_prob )
_lowerCamelCase : Any = nn.Linear(config.hidden_size,self.config.num_labels )
@add_start_docstrings_to_model_forward(__A )
def lowerCamelCase_ ( self : Tuple,__A : Dict=None,__A : List[str]=None,__A : Any=None,__A : List[str]=None,__A : Optional[int]=None,__A : Any=None,__A : List[str]=None,__A : List[str]=-1,__A : Dict=False,):
_lowerCamelCase : Optional[Any] = self.num_layers
try:
_lowerCamelCase : Optional[Any] = self.roberta(
__A,attention_mask=__A,token_type_ids=__A,position_ids=__A,head_mask=__A,inputs_embeds=__A,)
_lowerCamelCase : int = outputs[1]
_lowerCamelCase : str = self.dropout(__A )
_lowerCamelCase : Any = self.classifier(__A )
_lowerCamelCase : Dict = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_lowerCamelCase : Tuple = e.message
_lowerCamelCase : Optional[Any] = e.exit_layer
_lowerCamelCase : Union[str, Any] = outputs[0]
if not self.training:
_lowerCamelCase : Any = entropy(__A )
_lowerCamelCase : Any = []
_lowerCamelCase : List[Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_lowerCamelCase : List[str] = MSELoss()
_lowerCamelCase : List[str] = loss_fct(logits.view(-1 ),labels.view(-1 ) )
else:
_lowerCamelCase : Tuple = CrossEntropyLoss()
_lowerCamelCase : str = loss_fct(logits.view(-1,self.num_labels ),labels.view(-1 ) )
# work with highway exits
_lowerCamelCase : Union[str, Any] = []
for highway_exit in outputs[-1]:
_lowerCamelCase : List[Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(__A )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_lowerCamelCase : Optional[Any] = MSELoss()
_lowerCamelCase : Any = loss_fct(highway_logits.view(-1 ),labels.view(-1 ) )
else:
_lowerCamelCase : List[str] = CrossEntropyLoss()
_lowerCamelCase : Optional[int] = loss_fct(highway_logits.view(-1,self.num_labels ),labels.view(-1 ) )
highway_losses.append(__A )
if train_highway:
_lowerCamelCase : str = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_lowerCamelCase : str = (loss,) + outputs
if not self.training:
_lowerCamelCase : List[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_lowerCamelCase : Optional[Any] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy | 44 |
'''simple docstring'''
import math
import sys
def _lowerCAmelCase ( __snake_case : int ) -> int:
if number != int(__snake_case ):
raise ValueError('the value of input must be a natural number' )
if number < 0:
raise ValueError('the value of input must not be a negative number' )
if number == 0:
return 1
__A : str = [-1] * (number + 1)
__A : Dict = 0
for i in range(1 , number + 1 ):
__A : int = sys.maxsize
__A : int = int(math.sqrt(__snake_case ) )
for j in range(1 , root + 1 ):
__A : str = 1 + answers[i - (j**2)]
__A : Dict = min(__snake_case , __snake_case )
__A : Union[str, Any] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 0 |
UpperCamelCase = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def A ( lowercase__ : bytes ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(lowercase__ , lowercase__ ):
UpperCamelCase__ :Dict = f"""a bytes-like object is required, not '{data.__class__.__name__}'"""
raise TypeError(lowercase__ )
UpperCamelCase__ :Any = """""".join(bin(lowercase__ )[2:].zfill(8 ) for byte in data )
UpperCamelCase__ :Optional[Any] = len(lowercase__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
UpperCamelCase__ :int = b"""=""" * ((6 - len(lowercase__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(lowercase__ ) % 6)
else:
UpperCamelCase__ :List[Any] = b""""""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(lowercase__ ) , 6 ) ).encode()
+ padding
)
def A ( lowercase__ : str ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(lowercase__ , lowercase__ ) and not isinstance(lowercase__ , lowercase__ ):
UpperCamelCase__ :Dict = (
"""argument should be a bytes-like object or ASCII string, """
f"""not '{encoded_data.__class__.__name__}'"""
)
raise TypeError(lowercase__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(lowercase__ , lowercase__ ):
try:
UpperCamelCase__ :List[str] = encoded_data.decode("""utf-8""" )
except UnicodeDecodeError:
raise ValueError("""base64 encoded data should only contain ASCII characters""" )
UpperCamelCase__ :int = encoded_data.count("""=""" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(lowercase__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
UpperCamelCase__ :int = encoded_data[:-padding]
UpperCamelCase__ :Optional[int] = """""".join(
bin(B64_CHARSET.index(lowercase__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
UpperCamelCase__ :List[str] = """""".join(
bin(B64_CHARSET.index(lowercase__ ) )[2:].zfill(6 ) for char in encoded_data )
UpperCamelCase__ :Optional[int] = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(lowercase__ ) , 8 )
]
return bytes(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 45 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( __snake_case : list[int] , __snake_case : list[int] , __snake_case : int ) -> tuple[float, list[float]]:
__A : int = list(range(len(__snake_case ) ) )
__A : Optional[Any] = [v / w for v, w in zip(__snake_case , __snake_case )]
index.sort(key=lambda __snake_case : ratio[i] , reverse=__snake_case )
__A : float = 0
__A : list[float] = [0] * len(__snake_case )
for i in index:
if weight[i] <= capacity:
__A : Optional[int] = 1
max_value += value[i]
capacity -= weight[i]
else:
__A : List[Any] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 0 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> float:
'''simple docstring'''
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod() | 46 |
'''simple docstring'''
from __future__ import annotations
import math
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase):
'''simple docstring'''
__A : int = size
# approximate the overall size of segment tree with given value
__A : Optional[Any] = [0 for i in range(0 , 4 * size)]
# create array to store lazy update
__A : Optional[Any] = [0 for i in range(0 , 4 * size)]
__A : str = [0 for i in range(0 , 4 * size)] # flag for lazy update
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return idx * 2
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return idx * 2 + 1
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if left_element == right_element:
__A : List[Any] = a[left_element - 1]
else:
__A : List[str] = (left_element + right_element) // 2
self.build(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
self.build(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase)
__A : Any = max(
self.segment_tree[self.left(_UpperCAmelCase)] , self.segment_tree[self.right(_UpperCAmelCase)])
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if self.flag[idx] is True:
__A : Optional[Any] = self.lazy[idx]
__A : Optional[Any] = False
if left_element != right_element:
__A : List[Any] = self.lazy[idx]
__A : Dict = self.lazy[idx]
__A : Tuple = True
__A : Union[str, Any] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
__A : Optional[int] = val
if left_element != right_element:
__A : Tuple = val
__A : Any = val
__A : Tuple = True
__A : Union[str, Any] = True
return True
__A : str = (left_element + right_element) // 2
self.update(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
self.update(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
__A : int = max(
self.segment_tree[self.left(_UpperCAmelCase)] , self.segment_tree[self.right(_UpperCAmelCase)])
return True
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if self.flag[idx] is True:
__A : Union[str, Any] = self.lazy[idx]
__A : List[str] = False
if left_element != right_element:
__A : Union[str, Any] = self.lazy[idx]
__A : Optional[int] = self.lazy[idx]
__A : str = True
__A : Union[str, Any] = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
__A : Any = (left_element + right_element) // 2
__A : int = self.query(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
__A : Union[str, Any] = self.query(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
return max(_UpperCAmelCase , _UpperCAmelCase)
def __str__( self):
'''simple docstring'''
return str([self.query(1 , 1 , self.size , _UpperCAmelCase , _UpperCAmelCase) for i in range(1 , self.size + 1)])
if __name__ == "__main__":
lowercase__ : Union[str, Any] = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
lowercase__ : str = 15
lowercase__ : List[Any] = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 1_11)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 2_35)
print(segt) | 8 | 0 |
from datetime import datetime
import requests
def UpperCAmelCase__ ( lowerCamelCase_ : str ):
__a : Dict = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
__a : Dict = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(lowerCamelCase_ ).content
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = input('''Enter Video/IGTV url: ''').strip()
SCREAMING_SNAKE_CASE__ = F"{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"
with open(file_name, '''wb''') as fp:
fp.write(download_video(url))
print(F"Done. Video saved to disk as {file_name}.")
| 47 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : int , __snake_case : int , __snake_case : int ) -> float:
__A : Dict = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def _lowerCAmelCase ( ) -> Union[str, Any]:
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class A :
def __init__( self : Dict , __magic_name__ : Collection[float] | None = None ):
"""simple docstring"""
if components is None:
lowerCAmelCase__ = []
lowerCAmelCase__ = list(__magic_name__ )
def __len__( self : Optional[Any] ):
"""simple docstring"""
return len(self.__components )
def __str__( self : Optional[Any] ):
"""simple docstring"""
return "(" + ",".join(map(__magic_name__ , self.__components ) ) + ")"
def __add__( self : Dict , __magic_name__ : Vector ):
"""simple docstring"""
lowerCAmelCase__ = len(self )
if size == len(__magic_name__ ):
lowerCAmelCase__ = [self.__components[i] + other.component(__magic_name__ ) for i in range(__magic_name__ )]
return Vector(__magic_name__ )
else:
raise Exception("must have the same size" )
def __sub__( self : Tuple , __magic_name__ : Vector ):
"""simple docstring"""
lowerCAmelCase__ = len(self )
if size == len(__magic_name__ ):
lowerCAmelCase__ = [self.__components[i] - other.component(__magic_name__ ) for i in range(__magic_name__ )]
return Vector(__magic_name__ )
else: # error case
raise Exception("must have the same size" )
@overload
def __mul__( self : Optional[Any] , __magic_name__ : float ):
"""simple docstring"""
...
@overload
def __mul__( self : Union[str, Any] , __magic_name__ : Vector ):
"""simple docstring"""
...
def __mul__( self : Union[str, Any] , __magic_name__ : float | Vector ):
"""simple docstring"""
if isinstance(__magic_name__ , (float, int) ):
lowerCAmelCase__ = [c * other for c in self.__components]
return Vector(__magic_name__ )
elif isinstance(__magic_name__ , __magic_name__ ) and len(self ) == len(__magic_name__ ):
lowerCAmelCase__ = len(self )
lowerCAmelCase__ = [self.__components[i] * other.component(__magic_name__ ) for i in range(__magic_name__ )]
return sum(__magic_name__ )
else: # error case
raise Exception("invalid operand!" )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
return Vector(self.__components )
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : int ):
"""simple docstring"""
if isinstance(__magic_name__ , __magic_name__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("index out of range" )
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : int , __magic_name__ : float ):
"""simple docstring"""
assert -len(self.__components ) <= pos < len(self.__components )
lowerCAmelCase__ = value
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
if len(self.__components ) == 0:
raise Exception("Vector is empty" )
lowerCAmelCase__ = [c**2 for c in self.__components]
return math.sqrt(sum(__magic_name__ ) )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : Vector , __magic_name__ : bool = False ):
"""simple docstring"""
lowerCAmelCase__ = self * other
lowerCAmelCase__ = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def A ( UpperCamelCase_ : int ) -> Vector:
'''simple docstring'''
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
return Vector([0] * dimension )
def A ( UpperCamelCase_ : int , UpperCamelCase_ : int ) -> Vector:
'''simple docstring'''
assert isinstance(UpperCamelCase_ , UpperCamelCase_ ) and (isinstance(UpperCamelCase_ , UpperCamelCase_ ))
lowerCAmelCase__ = [0] * dimension
lowerCAmelCase__ = 1
return Vector(UpperCamelCase_ )
def A ( UpperCamelCase_ : float , UpperCamelCase_ : Vector , UpperCamelCase_ : Vector ) -> Vector:
'''simple docstring'''
assert (
isinstance(UpperCamelCase_ , UpperCamelCase_ )
and isinstance(UpperCamelCase_ , UpperCamelCase_ )
and (isinstance(UpperCamelCase_ , (int, float) ))
)
return x * scalar + y
def A ( UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> Vector:
'''simple docstring'''
random.seed(UpperCamelCase_ )
lowerCAmelCase__ = [random.randint(UpperCamelCase_ , UpperCamelCase_ ) for _ in range(UpperCamelCase_ )]
return Vector(UpperCamelCase_ )
class A :
def __init__( self : Dict , __magic_name__ : list[list[float]] , __magic_name__ : int , __magic_name__ : int ):
"""simple docstring"""
lowerCAmelCase__ = matrix
lowerCAmelCase__ = w
lowerCAmelCase__ = h
def __str__( self : str ):
"""simple docstring"""
lowerCAmelCase__ = ""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : int , __magic_name__ : Matrix ):
"""simple docstring"""
if self.__width == other.width() and self.__height == other.height():
lowerCAmelCase__ = []
for i in range(self.__height ):
lowerCAmelCase__ = [
self.__matrix[i][j] + other.component(__magic_name__ , __magic_name__ )
for j in range(self.__width )
]
matrix.append(__magic_name__ )
return Matrix(__magic_name__ , self.__width , self.__height )
else:
raise Exception("matrix must have the same dimension!" )
def __sub__( self : Dict , __magic_name__ : Matrix ):
"""simple docstring"""
if self.__width == other.width() and self.__height == other.height():
lowerCAmelCase__ = []
for i in range(self.__height ):
lowerCAmelCase__ = [
self.__matrix[i][j] - other.component(__magic_name__ , __magic_name__ )
for j in range(self.__width )
]
matrix.append(__magic_name__ )
return Matrix(__magic_name__ , self.__width , self.__height )
else:
raise Exception("matrices must have the same dimension!" )
@overload
def __mul__( self : Optional[Any] , __magic_name__ : float ):
"""simple docstring"""
...
@overload
def __mul__( self : Optional[Any] , __magic_name__ : Vector ):
"""simple docstring"""
...
def __mul__( self : Optional[int] , __magic_name__ : float | Vector ):
"""simple docstring"""
if isinstance(__magic_name__ , __magic_name__ ): # matrix-vector
if len(__magic_name__ ) == self.__width:
lowerCAmelCase__ = zero_vector(self.__height )
for i in range(self.__height ):
lowerCAmelCase__ = [
self.__matrix[i][j] * other.component(__magic_name__ )
for j in range(self.__width )
]
ans.change_component(__magic_name__ , sum(__magic_name__ ) )
return ans
else:
raise Exception(
"vector must have the same size as the "
"number of columns of the matrix!" )
elif isinstance(__magic_name__ , (int, float) ): # matrix-scalar
lowerCAmelCase__ = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(__magic_name__ , self.__width , self.__height )
return None
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return self.__height
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
return self.__width
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : int , __magic_name__ : int ):
"""simple docstring"""
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("change_component: indices out of bounds" )
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : int , __magic_name__ : int , __magic_name__ : float ):
"""simple docstring"""
if 0 <= x < self.__height and 0 <= y < self.__width:
lowerCAmelCase__ = value
else:
raise Exception("change_component: indices out of bounds" )
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : int , __magic_name__ : int ):
"""simple docstring"""
if self.__height != self.__width:
raise Exception("Matrix is not square" )
lowerCAmelCase__ = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(__magic_name__ ) ):
lowerCAmelCase__ = minor[i][:y] + minor[i][y + 1 :]
return Matrix(__magic_name__ , self.__width - 1 , self.__height - 1 ).determinant()
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : int , __magic_name__ : int ):
"""simple docstring"""
if self.__height != self.__width:
raise Exception("Matrix is not square" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(__magic_name__ , __magic_name__ )
else:
raise Exception("Indices out of bounds" )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
if self.__height != self.__width:
raise Exception("Matrix is not square" )
if self.__height < 1:
raise Exception("Matrix has no element" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
lowerCAmelCase__ = [
self.__matrix[0][y] * self.cofactor(0 , __magic_name__ ) for y in range(self.__width )
]
return sum(__magic_name__ )
def A ( UpperCamelCase_ : int ) -> Matrix:
'''simple docstring'''
lowerCAmelCase__ = [[0] * n for _ in range(UpperCamelCase_ )]
return Matrix(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def A ( UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> Matrix:
'''simple docstring'''
random.seed(UpperCamelCase_ )
lowerCAmelCase__ = [
[random.randint(UpperCamelCase_ , UpperCamelCase_ ) for _ in range(UpperCamelCase_ )] for _ in range(UpperCamelCase_ )
]
return Matrix(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
| 48 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ):
'''simple docstring'''
__A : Optional[int] = parent
__A : str = 13
__A : List[Any] = 7
__A : List[str] = True
__A : str = True
__A : Optional[Any] = True
__A : int = True
__A : Dict = 99
__A : Dict = 384
__A : Any = 2
__A : int = 4
__A : Optional[Any] = 37
__A : Optional[int] = 'gelu'
__A : Dict = 0.1
__A : Optional[int] = 0.1
__A : Any = 512
__A : int = 16
__A : List[str] = 2
__A : str = 0.02
__A : Any = 3
__A : str = 4
__A : Union[str, Any] = 128
__A : int = 2
__A : List[Any] = 9
__A : List[Any] = 1
__A : List[Any] = None
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__A : str = None
if self.use_input_mask:
__A : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
__A : Optional[Any] = None
if self.use_token_type_ids:
__A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__A : Optional[int] = None
__A : List[str] = None
__A : Dict = None
if self.use_labels:
__A : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__A : str = ids_tensor([self.batch_size] , self.num_choices)
__A : List[Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : int = TFConvBertModel(config=_UpperCAmelCase)
__A : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__A : Tuple = [input_ids, input_mask]
__A : Any = model(_UpperCAmelCase)
__A : Dict = model(_UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : str = TFConvBertForMaskedLM(config=_UpperCAmelCase)
__A : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : str = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[int] = self.num_labels
__A : Any = TFConvBertForSequenceClassification(config=_UpperCAmelCase)
__A : Optional[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : Dict = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Tuple = self.num_choices
__A : List[str] = TFConvBertForMultipleChoice(config=_UpperCAmelCase)
__A : int = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : Optional[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : List[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : int = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__A : Optional[Any] = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = self.num_labels
__A : List[Any] = TFConvBertForTokenClassification(config=_UpperCAmelCase)
__A : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : int = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[Any] = TFConvBertForQuestionAnswering(config=_UpperCAmelCase)
__A : Any = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : Union[str, Any] = model(_UpperCAmelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = self.prepare_config_and_inputs()
(
(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,
) : Union[str, Any] = config_and_inputs
__A : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE (a__ , a__ , unittest.TestCase ):
lowerCAmelCase = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = TFConvBertModelTester(self)
__A : str = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase)
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__A : List[str] = True
__A : List[str] = True
if hasattr(_UpperCAmelCase , 'use_cache'):
__A : List[Any] = True
__A : str = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length)
__A : Union[str, Any] = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
for model_class in self.all_model_classes:
__A : List[str] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)
__A : Optional[int] = model_class(_UpperCAmelCase)
__A : Optional[Any] = len(model(_UpperCAmelCase))
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase)
__A : Union[str, Any] = os.path.join(_UpperCAmelCase , 'saved_model' , '1')
__A : Tuple = tf.keras.models.load_model(_UpperCAmelCase)
__A : str = model(_UpperCAmelCase)
if self.is_encoder_decoder:
__A : Optional[int] = outputs['encoder_hidden_states']
__A : str = outputs['encoder_attentions']
else:
__A : List[Any] = outputs['hidden_states']
__A : Optional[Any] = outputs['attentions']
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
__A : str = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
self.assertListEqual(
list(output_hidden_states[0].shape[-2:]) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(output_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = TFConvBertModel.from_pretrained('YituTech/conv-bert-base')
self.assertIsNotNone(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__A : Any = True
__A : str = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length)
__A : Any = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length)
__A : int = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
__A : Tuple = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
def check_decoder_attentions_output(_UpperCAmelCase):
__A : List[str] = len(_UpperCAmelCase)
self.assertEqual(out_len % 2 , 0)
__A : Any = outputs.decoder_attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_UpperCAmelCase):
__A : str = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__A : Dict = True
__A : Any = False
__A : str = model_class(_UpperCAmelCase)
__A : List[str] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : List[str] = len(_UpperCAmelCase)
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
if self.is_encoder_decoder:
__A : Union[str, Any] = model_class(_UpperCAmelCase)
__A : int = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_decoder_attentions_output(_UpperCAmelCase)
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__A : int = True
__A : Tuple = model_class(_UpperCAmelCase)
__A : Dict = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
# Check attention is always last and order is fine
__A : Any = True
__A : str = True
__A : Union[str, Any] = model_class(_UpperCAmelCase)
__A : Union[str, Any] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase))
self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
@require_tf
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = TFConvBertModel.from_pretrained('YituTech/conv-bert-base')
__A : str = tf.constant([[0, 1, 2, 3, 4, 5]])
__A : Optional[int] = model(_UpperCAmelCase)[0]
__A : List[Any] = [1, 6, 768]
self.assertEqual(output.shape , _UpperCAmelCase)
__A : Tuple = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
])
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4) | 8 | 0 |
"""simple docstring"""
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _UpperCAmelCase ( _lowerCAmelCase , unittest.TestCase ):
a__ : List[Any] = MobileBertTokenizer
a__ : str = MobileBertTokenizerFast
a__ : str = True
a__ : Optional[Any] = True
a__ : List[str] = filter_non_english
a__ : Optional[Any] = "google/mobilebert-uncased"
def a ( self : int ):
super().setUp()
__UpperCAmelCase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
__UpperCAmelCase = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def a ( self : Any , _lowercase : Union[str, Any] ):
__UpperCAmelCase = '''UNwant\u00E9d,running'''
__UpperCAmelCase = '''unwanted, running'''
return input_text, output_text
def a ( self : Optional[int] ):
__UpperCAmelCase = self.tokenizer_class(self.vocab_file )
__UpperCAmelCase = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_lowercase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , [9, 6, 7, 12, 10, 11] )
def a ( self : Any ):
if not self.test_rust_tokenizer:
return
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = self.get_rust_tokenizer()
__UpperCAmelCase = '''UNwant\u00E9d,running'''
__UpperCAmelCase = tokenizer.tokenize(_lowercase )
__UpperCAmelCase = rust_tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
__UpperCAmelCase = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
__UpperCAmelCase = rust_tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
self.assertListEqual(_lowercase , _lowercase )
__UpperCAmelCase = self.get_rust_tokenizer()
__UpperCAmelCase = tokenizer.encode(_lowercase )
__UpperCAmelCase = rust_tokenizer.encode(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
# With lower casing
__UpperCAmelCase = self.get_tokenizer(do_lower_case=_lowercase )
__UpperCAmelCase = self.get_rust_tokenizer(do_lower_case=_lowercase )
__UpperCAmelCase = '''UNwant\u00E9d,running'''
__UpperCAmelCase = tokenizer.tokenize(_lowercase )
__UpperCAmelCase = rust_tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
__UpperCAmelCase = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
__UpperCAmelCase = rust_tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
self.assertListEqual(_lowercase , _lowercase )
__UpperCAmelCase = self.get_rust_tokenizer()
__UpperCAmelCase = tokenizer.encode(_lowercase )
__UpperCAmelCase = rust_tokenizer.encode(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
def a ( self : Union[str, Any] ):
__UpperCAmelCase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def a ( self : Optional[Any] ):
__UpperCAmelCase = BasicTokenizer(do_lower_case=_lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def a ( self : int ):
__UpperCAmelCase = BasicTokenizer(do_lower_case=_lowercase , strip_accents=_lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def a ( self : Optional[Any] ):
__UpperCAmelCase = BasicTokenizer(do_lower_case=_lowercase , strip_accents=_lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def a ( self : str ):
__UpperCAmelCase = BasicTokenizer(do_lower_case=_lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def a ( self : List[Any] ):
__UpperCAmelCase = BasicTokenizer(do_lower_case=_lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def a ( self : Dict ):
__UpperCAmelCase = BasicTokenizer(do_lower_case=_lowercase , strip_accents=_lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def a ( self : List[Any] ):
__UpperCAmelCase = BasicTokenizer(do_lower_case=_lowercase , strip_accents=_lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def a ( self : Union[str, Any] ):
__UpperCAmelCase = BasicTokenizer(do_lower_case=_lowercase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def a ( self : Optional[int] ):
__UpperCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
__UpperCAmelCase = {}
for i, token in enumerate(_lowercase ):
__UpperCAmelCase = i
__UpperCAmelCase = WordpieceTokenizer(vocab=_lowercase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def a ( self : str ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def a ( self : Optional[int] ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def a ( self : Dict ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def a ( self : Dict ):
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_lowercase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(_lowercase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def a ( self : List[Any] ):
__UpperCAmelCase = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' )
__UpperCAmelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=_lowercase )
__UpperCAmelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_lowercase )
__UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(_lowercase )
__UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(_lowercase , _lowercase )
assert encoded_sentence == [1_01] + text + [1_02]
assert encoded_pair == [1_01] + text + [1_02] + text_a + [1_02]
def a ( self : Union[str, Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase )
__UpperCAmelCase = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
__UpperCAmelCase = tokenizer_r.encode_plus(
_lowercase , return_attention_mask=_lowercase , return_token_type_ids=_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase , )
__UpperCAmelCase = tokenizer_r.do_lower_case if hasattr(_lowercase , '''do_lower_case''' ) else False
__UpperCAmelCase = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def a ( self : str ):
__UpperCAmelCase = ['''的''', '''人''', '''有''']
__UpperCAmelCase = ''''''.join(_lowercase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCAmelCase = True
__UpperCAmelCase = self.tokenizer_class.from_pretrained(_lowercase , **_lowercase )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase )
__UpperCAmelCase = tokenizer_p.encode(_lowercase , add_special_tokens=_lowercase )
__UpperCAmelCase = tokenizer_r.encode(_lowercase , add_special_tokens=_lowercase )
__UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(_lowercase )
__UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(_lowercase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_lowercase , _lowercase )
self.assertListEqual(_lowercase , _lowercase )
__UpperCAmelCase = False
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase )
__UpperCAmelCase = self.tokenizer_class.from_pretrained(_lowercase , **_lowercase )
__UpperCAmelCase = tokenizer_r.encode(_lowercase , add_special_tokens=_lowercase )
__UpperCAmelCase = tokenizer_p.encode(_lowercase , add_special_tokens=_lowercase )
__UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(_lowercase )
__UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(_lowercase )
# it is expected that only the first Chinese character is not preceded by "##".
__UpperCAmelCase = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(_lowercase )
]
self.assertListEqual(_lowercase , _lowercase )
self.assertListEqual(_lowercase , _lowercase )
| 49 |
'''simple docstring'''
import argparse
import os
import re
lowercase__ : Optional[int] = '''src/diffusers'''
# Pattern that looks at the indentation in a line.
lowercase__ : Dict = re.compile(r'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
lowercase__ : List[str] = re.compile(r'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowercase__ : Tuple = re.compile(r'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
lowercase__ : str = re.compile(r'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowercase__ : str = re.compile(r'''\[([^\]]+)\]''')
def _lowerCAmelCase ( __snake_case : str ) -> Tuple:
__A : List[Any] = _re_indent.search(__snake_case )
return "" if search is None else search.groups()[0]
def _lowerCAmelCase ( __snake_case : Optional[Any] , __snake_case : str="" , __snake_case : Any=None , __snake_case : List[Any]=None ) -> Optional[int]:
__A : Tuple = 0
__A : Optional[int] = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(__snake_case ):
index += 1
__A : Optional[int] = ['\n'.join(lines[:index] )]
else:
__A : Any = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__A : Tuple = [lines[index]]
index += 1
while index < len(__snake_case ) and (end_prompt is None or not lines[index].startswith(__snake_case )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(__snake_case ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(__snake_case ) )
if index < len(__snake_case ) - 1:
__A : Union[str, Any] = [lines[index + 1]]
index += 1
else:
__A : Union[str, Any] = []
else:
blocks.append('\n'.join(__snake_case ) )
__A : Optional[Any] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(__snake_case ) > 0:
blocks.append('\n'.join(__snake_case ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(__snake_case ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def _lowerCAmelCase ( __snake_case : List[Any] ) -> int:
def _inner(__snake_case : List[Any] ):
return key(__snake_case ).lower().replace('_' , '' )
return _inner
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : Any=None ) -> List[Any]:
# If no key is provided, we use a noop.
def noop(__snake_case : List[Any] ):
return x
if key is None:
__A : Optional[Any] = noop
# Constants are all uppercase, they go first.
__A : str = [obj for obj in objects if key(__snake_case ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__A : List[str] = [obj for obj in objects if key(__snake_case )[0].isupper() and not key(__snake_case ).isupper()]
# Functions begin with a lowercase, they go last.
__A : str = [obj for obj in objects if not key(__snake_case )[0].isupper()]
__A : Tuple = ignore_underscore(__snake_case )
return sorted(__snake_case , key=__snake_case ) + sorted(__snake_case , key=__snake_case ) + sorted(__snake_case , key=__snake_case )
def _lowerCAmelCase ( __snake_case : Optional[int] ) -> Tuple:
# This inner function sort imports between [ ].
def _replace(__snake_case : Tuple ):
__A : List[str] = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
__A : int = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__A : Dict = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(__snake_case )] ) + "]"
__A : List[Any] = import_statement.split('\n' )
if len(__snake_case ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__A : Optional[int] = 2 if lines[1].strip() == '[' else 1
__A : Any = [(i, _re_strip_line.search(__snake_case ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__A : Optional[int] = sort_objects(__snake_case , key=lambda __snake_case : x[1] )
__A : Any = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(__snake_case ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__A : Union[str, Any] = _re_bracket_content.sub(_replace , lines[1] )
else:
__A : Dict = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__A : Tuple = keys[:-1]
__A : List[Any] = get_indent(lines[1] ) + ', '.join([f'"{k}"' for k in sort_objects(__snake_case )] )
return "\n".join(__snake_case )
else:
# Finally we have to deal with imports fitting on one line
__A : Optional[Any] = _re_bracket_content.sub(_replace , __snake_case )
return import_statement
def _lowerCAmelCase ( __snake_case : List[Any] , __snake_case : List[Any]=True ) -> Optional[Any]:
with open(__snake_case , 'r' ) as f:
__A : Dict = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__A : str = split_code_in_indented_blocks(
__snake_case , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(__snake_case ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__A : Tuple = main_blocks[block_idx]
__A : int = block.split('\n' )
# Get to the start of the imports.
__A : Tuple = 0
while line_idx < len(__snake_case ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__A : Optional[int] = len(__snake_case )
else:
line_idx += 1
if line_idx >= len(__snake_case ):
continue
# Ignore beginning and last line: they don't contain anything.
__A : Dict = '\n'.join(block_lines[line_idx:-1] )
__A : int = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__A : Optional[int] = split_code_in_indented_blocks(__snake_case , indent_level=__snake_case )
# We have two categories of import key: list or _import_structure[key].append/extend
__A : Any = _re_direct_key if '_import_structure' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__A : Dict = [(pattern.search(__snake_case ).groups()[0] if pattern.search(__snake_case ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__A : Optional[Any] = [(i, key) for i, key in enumerate(__snake_case ) if key is not None]
__A : Tuple = [x[0] for x in sorted(__snake_case , key=lambda __snake_case : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__A : str = 0
__A : Any = []
for i in range(len(__snake_case ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
__A : str = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(__snake_case )
count += 1
# And we put our main block back together with its first and last line.
__A : int = '\n'.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(__snake_case ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(__snake_case , 'w' ) as f:
f.write('\n'.join(__snake_case ) )
def _lowerCAmelCase ( __snake_case : int=True ) -> Optional[Any]:
__A : Tuple = []
for root, _, files in os.walk(__snake_case ):
if "__init__.py" in files:
__A : List[Any] = sort_imports(os.path.join(__snake_case , '__init__.py' ) , check_only=__snake_case )
if result:
__A : Dict = [os.path.join(__snake_case , '__init__.py' )]
if len(__snake_case ) > 0:
raise ValueError(f'Would overwrite {len(__snake_case )} files, run `make style`.' )
if __name__ == "__main__":
lowercase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
lowercase__ : Union[str, Any] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only) | 8 | 0 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCamelCase : Tuple = 16
UpperCamelCase : List[Any] = 32
def A__ ( __lowerCAmelCase : Accelerator , __lowerCAmelCase : int = 16 ):
lowerCamelCase__ = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowerCamelCase__ = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__lowerCAmelCase : str ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase__ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCamelCase__ = datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase__ = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__lowerCAmelCase : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCamelCase__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCamelCase__ = 16
elif accelerator.mixed_precision != "no":
lowerCamelCase__ = 8
else:
lowerCamelCase__ = None
return tokenizer.pad(
__lowerCAmelCase , padding="""longest""" , max_length=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowerCamelCase__ = DataLoader(
tokenized_datasets["""train"""] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
lowerCamelCase__ = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCamelCase : Optional[Any] = mocked_dataloaders # noqa: F811
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] ):
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __lowerCAmelCase ) == "1":
lowerCamelCase__ = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
lowerCamelCase__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="""all""" , project_dir=args.project_dir )
else:
lowerCamelCase__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase__ = config["""lr"""]
lowerCamelCase__ = int(config["""num_epochs"""] )
lowerCamelCase__ = int(config["""seed"""] )
lowerCamelCase__ = int(config["""batch_size"""] )
set_seed(__lowerCAmelCase )
lowerCamelCase__ , lowerCamelCase__ = get_dataloaders(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
lowerCamelCase__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCamelCase__ = batch_size // MAX_GPU_BATCH_SIZE
lowerCamelCase__ = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase__ = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__lowerCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase__ = model.to(accelerator.device )
# Instantiate optimizer
lowerCamelCase__ = AdamW(params=model.parameters() , lr=__lowerCAmelCase )
# Instantiate scheduler
lowerCamelCase__ = get_linear_schedule_with_warmup(
optimizer=__lowerCAmelCase , num_warmup_steps=100 , num_training_steps=(len(__lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = accelerator.prepare(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
lowerCamelCase__ = os.path.split(__lowerCAmelCase )[-1].split(""".""" )[0]
accelerator.init_trackers(__lowerCAmelCase , __lowerCAmelCase )
# Now we train the model
for epoch in range(__lowerCAmelCase ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
lowerCamelCase__ = 0
for step, batch in enumerate(__lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCamelCase__ = model(**__lowerCAmelCase )
lowerCamelCase__ = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
lowerCamelCase__ = loss / gradient_accumulation_steps
accelerator.backward(__lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase__ = model(**__lowerCAmelCase )
lowerCamelCase__ = outputs.logits.argmax(dim=-1 )
lowerCamelCase__ , lowerCamelCase__ = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__lowerCAmelCase , references=__lowerCAmelCase , )
lowerCamelCase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , __lowerCAmelCase )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"""accuracy""": eval_metric["""accuracy"""],
"""f1""": eval_metric["""f1"""],
"""train_loss""": total_loss.item() / len(__lowerCAmelCase ),
"""epoch""": epoch,
} , step=__lowerCAmelCase , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def A__ ( ):
lowerCamelCase__ = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__lowerCAmelCase , default=__lowerCAmelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
parser.add_argument(
"""--with_tracking""" , action="""store_true""" , help="""Whether to load in all available experiment trackers from the environment and use them for logging.""" , )
parser.add_argument(
"""--project_dir""" , type=__lowerCAmelCase , default="""logs""" , help="""Location on where to store experiment tracking logs` and relevent project information""" , )
lowerCamelCase__ = parser.parse_args()
lowerCamelCase__ = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 50 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : int ) -> bool:
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('''Program to check whether a number is a Perfect number or not...''')
lowercase__ : int = int(input('''Enter number: ''').strip())
print(f"""{number} is {"" if perfect(number) else "not "}a Perfect Number.""") | 8 | 0 |
'''simple docstring'''
def __snake_case ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> float:
"""simple docstring"""
UpperCAmelCase = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def __snake_case ( ) -> List[Any]:
"""simple docstring"""
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51 |
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Tuple = logging.get_logger(__name__)
lowercase__ : str = [
['''attention''', '''attn'''],
['''encoder_attention''', '''encoder_attn'''],
['''q_lin''', '''q_proj'''],
['''k_lin''', '''k_proj'''],
['''v_lin''', '''v_proj'''],
['''out_lin''', '''out_proj'''],
['''norm_embeddings''', '''layernorm_embedding'''],
['''position_embeddings''', '''embed_positions'''],
['''embeddings''', '''embed_tokens'''],
['''ffn.lin''', '''fc'''],
]
def _lowerCAmelCase ( __snake_case : List[Any] ) -> Tuple:
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__A : Optional[Any] = k.replace(__snake_case , __snake_case )
if k.startswith('encoder' ):
__A : Any = k.replace('.attn' , '.self_attn' )
__A : Any = k.replace('norm1' , 'self_attn_layer_norm' )
__A : str = k.replace('norm2' , 'final_layer_norm' )
elif k.startswith('decoder' ):
__A : Tuple = k.replace('norm1' , 'self_attn_layer_norm' )
__A : str = k.replace('norm2' , 'encoder_attn_layer_norm' )
__A : int = k.replace('norm3' , 'final_layer_norm' )
return k
def _lowerCAmelCase ( __snake_case : List[Any] ) -> Dict:
__A : Optional[int] = [
'model.encoder.layernorm_embedding.weight',
'model.encoder.layernorm_embedding.bias',
'model.decoder.layernorm_embedding.weight',
'model.decoder.layernorm_embedding.bias',
]
for k in keys:
__A : Tuple = sd.pop(__snake_case )
__A : Union[str, Any] = k.replace('layernorm_embedding' , 'layer_norm' )
assert new_k not in sd
__A : str = v
lowercase__ : Tuple = ['''START''']
@torch.no_grad()
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : Any , __snake_case : List[Any] ) -> int:
__A : List[str] = torch.load(__snake_case , map_location='cpu' )
__A : Tuple = model['model']
__A : str = BlenderbotConfig.from_json_file(__snake_case )
__A : int = BlenderbotForConditionalGeneration(__snake_case )
__A : List[Any] = m.model.state_dict().keys()
__A : Optional[int] = []
__A : Optional[int] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__A : Union[str, Any] = rename_state_dict_key(__snake_case )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__A : Optional[Any] = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__snake_case )
m.model.load_state_dict(__snake_case , strict=__snake_case )
m.half()
m.save_pretrained(__snake_case )
if __name__ == "__main__":
lowercase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''')
parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''')
parser.add_argument(
'''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use'''
)
lowercase__ : Optional[Any] = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json) | 8 | 0 |
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
A = TypeVar('''KEY''')
A = TypeVar('''VAL''')
@dataclass(frozen=_UpperCamelCase , slots=_UpperCamelCase )
class __lowercase ( Generic[KEY, VAL] ):
'''simple docstring'''
__lowerCAmelCase = 42
__lowerCAmelCase = 42
class __lowercase ( _Item ):
'''simple docstring'''
def __init__( self ):
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __bool__( self ):
return False
A = _DeletedItem()
class __lowercase ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase = 8 , _UpperCAmelCase = 0.7_5 ):
__a : Union[str, Any] = initial_block_size
__a : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__a : Optional[Any] = capacity_factor
__a : Union[str, Any] = 0
def _lowerCamelCase ( self , _UpperCAmelCase ):
return hash(_UpperCAmelCase ) % len(self._buckets )
def _lowerCamelCase ( self , _UpperCAmelCase ):
return (ind + 1) % len(self._buckets )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : List[str] = self._buckets[ind]
if not stored:
__a : int = _Item(_UpperCAmelCase , _UpperCAmelCase )
self._len += 1
return True
elif stored.key == key:
__a : Dict = _Item(_UpperCAmelCase , _UpperCAmelCase )
return True
else:
return False
def _lowerCamelCase ( self ):
__a : Optional[int] = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(_UpperCAmelCase )
def _lowerCamelCase ( self ):
if len(self._buckets ) <= self._initial_block_size:
return False
__a : Optional[Any] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : List[str] = self._buckets
__a : List[Any] = [None] * new_size
__a : int = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def _lowerCamelCase ( self ):
self._resize(len(self._buckets ) * 2 )
def _lowerCamelCase ( self ):
self._resize(len(self._buckets ) // 2 )
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : int = self._get_bucket_index(_UpperCAmelCase )
for _ in range(len(self._buckets ) ):
yield ind
__a : Dict = self._get_next_ind(_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
for ind in self._iterate_buckets(_UpperCAmelCase ):
if self._try_set(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
break
def __setitem__( self , _UpperCAmelCase , _UpperCAmelCase ):
if self._is_full():
self._size_up()
self._add_item(_UpperCAmelCase , _UpperCAmelCase )
def __delitem__( self , _UpperCAmelCase ):
for ind in self._iterate_buckets(_UpperCAmelCase ):
__a : List[str] = self._buckets[ind]
if item is None:
raise KeyError(_UpperCAmelCase )
if item is _deleted:
continue
if item.key == key:
__a : Dict = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , _UpperCAmelCase ):
for ind in self._iterate_buckets(_UpperCAmelCase ):
__a : List[str] = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(_UpperCAmelCase )
def __len__( self ):
return self._len
def __iter__( self ):
yield from (item.key for item in self._buckets if item)
def __repr__( self ):
__a : List[str] = ''' ,'''.join(
f"""{item.key}: {item.val}""" for item in self._buckets if item )
return f"""HashMap({val_string})""" | 52 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None):
'''simple docstring'''
__A : List[Any] = list(poly_a or [0])[:]
__A : Optional[int] = list(poly_b or [0])[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
__A : Union[str, Any] = len(self.polyA)
while self.polyB[-1] == 0:
self.polyB.pop()
__A : Optional[int] = len(self.polyB)
# Add 0 to make lengths equal a power of 2
__A : Optional[Any] = int(
2 ** np.ceil(np.loga(len(self.polyA) + len(self.polyB) - 1)))
while len(self.polyA) < self.c_max_length:
self.polyA.append(0)
while len(self.polyB) < self.c_max_length:
self.polyB.append(0)
# A complex root used for the fourier transform
__A : str = complex(mpmath.root(x=1 , n=self.c_max_length , k=1))
# The product
__A : Tuple = self.__multiply()
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[int] = [[x] for x in self.polyA] if which == 'A' else [[x] for x in self.polyB]
# Corner case
if len(_UpperCAmelCase) <= 1:
return dft[0]
#
__A : Dict = self.c_max_length // 2
while next_ncol > 0:
__A : Optional[Any] = [[] for i in range(_UpperCAmelCase)]
__A : Tuple = self.root**next_ncol
# First half of next step
__A : Optional[Any] = 1
for j in range(self.c_max_length // (next_ncol * 2)):
for i in range(_UpperCAmelCase):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j])
current_root *= root
# Second half of next step
__A : List[str] = 1
for j in range(self.c_max_length // (next_ncol * 2)):
for i in range(_UpperCAmelCase):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j])
current_root *= root
# Update
__A : Optional[int] = new_dft
__A : Tuple = next_ncol // 2
return dft[0]
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.__dft('A')
__A : Optional[Any] = self.__dft('B')
__A : str = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length)]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0]) <= 1:
return inverce_c[0]
# Inverse DFT
__A : Dict = 2
while next_ncol <= self.c_max_length:
__A : Optional[int] = [[] for i in range(_UpperCAmelCase)]
__A : Any = self.root ** (next_ncol // 2)
__A : Tuple = 1
# First half of next step
for j in range(self.c_max_length // next_ncol):
for i in range(next_ncol // 2):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2)
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root))
current_root *= root
# Update
__A : int = new_inverse_c
next_ncol *= 2
# Unpack
__A : Optional[int] = [round(x[0].real , 8) + round(x[0].imag , 8) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self):
'''simple docstring'''
__A : int = 'A = ' + ' + '.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A]))
__A : Optional[Any] = 'B = ' + ' + '.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B]))
__A : str = 'A*B = ' + ' + '.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.product))
return F'{a}\n{b}\n{c}'
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 0 |
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int = 1_3 , lowerCAmelCase_ : int = 6_4 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : int = 1_2_8 , lowerCAmelCase_ : Any=[1_6, 3_2, 6_4, 1_2_8] , lowerCAmelCase_ : int = 7 , lowerCAmelCase_ : int = 4 , lowerCAmelCase_ : int = 3_7 , lowerCAmelCase_ : str = "gelu" , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : int = 1_0 , lowerCAmelCase_ : float = 0.02 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : int = 1_2_8 , lowerCAmelCase_ : List[int] = [2, 2, 2, 2] , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 2 , ) -> str:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = encoder_stride
__lowerCAmelCase = num_attention_outputs
__lowerCAmelCase = embed_dim
__lowerCAmelCase = embed_dim + 1
__lowerCAmelCase = resolution
__lowerCAmelCase = depths
__lowerCAmelCase = hidden_sizes
__lowerCAmelCase = dim
__lowerCAmelCase = mlp_expansion_ratio
def lowercase ( self : Any ) -> Dict:
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def lowercase ( self : Any ) -> List[str]:
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def lowercase ( self : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str ) -> List[str]:
__lowerCAmelCase = TFEfficientFormerModel(config=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ , training=lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any ) -> Dict:
__lowerCAmelCase = self.type_sequence_label_size
__lowerCAmelCase = TFEfficientFormerForImageClassification(lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ , labels=lowerCAmelCase_ , training=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowerCAmelCase = 1
__lowerCAmelCase = TFEfficientFormerForImageClassification(lowerCAmelCase_ )
__lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCAmelCase = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase ( self : Any ) -> Any:
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
a_ = (
{
"""feature-extraction""": TFEfficientFormerModel,
"""image-classification""": (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
def lowercase ( self : List[str] ) -> List[str]:
__lowerCAmelCase = TFEfficientFormerModelTester(self )
__lowerCAmelCase = ConfigTester(
self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=3_7 )
def lowercase ( self : Optional[Any] ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='EfficientFormer does not use inputs_embeds' )
def lowercase ( self : Dict ) -> Any:
pass
@unittest.skip(reason='EfficientFormer does not support input and output embeddings' )
def lowercase ( self : Optional[Any] ) -> Union[str, Any]:
pass
def lowercase ( self : Optional[Any] ) -> List[str]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowercase ( self : Tuple ) -> List[str]:
def check_hidden_states_output(lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str ):
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ )
__lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCAmelCase = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
if hasattr(self.model_tester , 'encoder_seq_length' ):
__lowerCAmelCase = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , 'chunk_length' ) and self.model_tester.chunk_length > 1:
__lowerCAmelCase = seq_length * self.model_tester.chunk_length
else:
__lowerCAmelCase = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
__lowerCAmelCase = outputs.decoder_hidden_states
self.asseretIsInstance(lowerCAmelCase_ , (list, tuple) )
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
__lowerCAmelCase = getattr(self.model_tester , 'seq_length' , lowerCAmelCase_ )
__lowerCAmelCase = getattr(self.model_tester , 'decoder_seq_length' , lowerCAmelCase_ )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any , lowerCAmelCase_ : Dict=False ) -> Dict:
__lowerCAmelCase = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowercase ( self : str ) -> Dict:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
@unittest.skip(reason='EfficientFormer does not implement masked image modeling yet' )
def lowercase ( self : Dict ) -> Optional[int]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase_ )
def lowercase ( self : Optional[int] ) -> Dict:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@slow
def lowercase ( self : Any ) -> List[Any]:
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = TFEfficientFormerModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def lowercase ( self : Optional[int] ) -> Any:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
__lowerCAmelCase = getattr(self.model_tester , 'seq_length' , lowerCAmelCase_ )
__lowerCAmelCase = getattr(self.model_tester , 'encoder_seq_length' , lowerCAmelCase_ )
__lowerCAmelCase = getattr(self.model_tester , 'key_length' , lowerCAmelCase_ )
__lowerCAmelCase = getattr(self.model_tester , 'chunk_length' , lowerCAmelCase_ )
if chunk_length is not None and hasattr(self.model_tester , 'num_hashes' ):
__lowerCAmelCase = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
__lowerCAmelCase = True
__lowerCAmelCase = False
__lowerCAmelCase = True
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ )
__lowerCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowerCAmelCase = True
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ )
__lowerCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def lowercase ( self : str ) -> List[str]:
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
__lowerCAmelCase = model_class(lowerCAmelCase_ )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
__lowerCAmelCase = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=lowerCAmelCase_ )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
__lowerCAmelCase = model(lowerCAmelCase_ )
self.assertTrue(outputs_dict is not None )
def a_ ( ):
__lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase ( self : str ) -> Optional[int]:
return (
EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300' )
if is_vision_available()
else None
)
@slow
def lowercase ( self : List[str] ) -> List[Any]:
__lowerCAmelCase = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300' )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=lowerCAmelCase_ , return_tensors='tf' )
# forward pass
__lowerCAmelCase = model(**lowerCAmelCase_ , training=lowerCAmelCase_ )
# verify the logits
__lowerCAmelCase = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
__lowerCAmelCase = tf.constant([-0.05_55, 0.48_25, -0.08_52] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
@slow
def lowercase ( self : List[str] ) -> Optional[int]:
__lowerCAmelCase = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'snap-research/efficientformer-l1-300' )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=lowerCAmelCase_ , return_tensors='tf' )
# forward pass
__lowerCAmelCase = model(**lowerCAmelCase_ , training=lowerCAmelCase_ )
# verify the logits
__lowerCAmelCase = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
__lowerCAmelCase = tf.constant([-0.13_12, 0.43_53, -1.04_99] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 53 |
'''simple docstring'''
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=[30, 30] , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=10 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=None , _UpperCAmelCase=8 , _UpperCAmelCase=10 , ):
'''simple docstring'''
__A : Union[str, Any] = parent
__A : Tuple = batch_size
__A : List[str] = image_size
__A : Dict = patch_size
__A : Optional[Any] = num_channels
__A : Tuple = is_training
__A : Dict = use_labels
__A : List[Any] = hidden_size
__A : Tuple = num_hidden_layers
__A : int = num_attention_heads
__A : Optional[int] = intermediate_size
__A : Tuple = hidden_act
__A : Any = hidden_dropout_prob
__A : Optional[Any] = attention_probs_dropout_prob
__A : List[Any] = type_sequence_label_size
__A : List[Any] = initializer_range
__A : Optional[int] = num_labels
__A : List[Any] = scope
__A : Any = n_targets
__A : Union[str, Any] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
__A : List[str] = (image_size[1] // patch_size) * (image_size[0] // patch_size)
__A : int = num_patches + 1 + self.num_detection_tokens
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]])
__A : Tuple = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
__A : List[Any] = []
for i in range(self.batch_size):
__A : Optional[int] = {}
__A : Union[str, Any] = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=_UpperCAmelCase)
__A : str = torch.rand(self.n_targets , 4 , device=_UpperCAmelCase)
labels.append(_UpperCAmelCase)
__A : Any = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Any = YolosModel(config=_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
__A : Dict = model(_UpperCAmelCase)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Any = YolosForObjectDetection(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
__A : str = model(pixel_values=_UpperCAmelCase)
__A : List[str] = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1))
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4))
__A : Union[str, Any] = model(pixel_values=_UpperCAmelCase , labels=_UpperCAmelCase)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1))
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = self.prepare_config_and_inputs()
__A ,__A ,__A : Tuple = config_and_inputs
__A : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE (a__ , a__ , unittest.TestCase ):
lowerCAmelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
lowerCAmelCase = (
{'''feature-extraction''': YolosModel, '''object-detection''': YolosForObjectDetection} if is_torch_available() else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False):
'''simple docstring'''
__A : Optional[Any] = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase)
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
__A : Any = []
for i in range(self.model_tester.batch_size):
__A : Tuple = {}
__A : Tuple = torch.ones(
size=(self.model_tester.n_targets,) , device=_UpperCAmelCase , dtype=torch.long)
__A : Optional[Any] = torch.ones(
self.model_tester.n_targets , 4 , device=_UpperCAmelCase , dtype=torch.float)
labels.append(_UpperCAmelCase)
__A : str = labels
return inputs_dict
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = YolosModelTester(self)
__A : Dict = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : Tuple = model_class(_UpperCAmelCase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
__A : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : List[Any] = model_class(_UpperCAmelCase)
__A : str = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A : int = [*signature.parameters.keys()]
__A : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__A : Optional[int] = True
# in YOLOS, the seq_len is different
__A : Dict = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
__A : Dict = True
__A : Dict = False
__A : Union[str, Any] = True
__A : Tuple = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : Any = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Union[str, Any] = outputs.attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__A : List[Any] = True
__A : List[str] = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : List[Any] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Optional[Any] = outputs.attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
__A : str = len(_UpperCAmelCase)
# Check attention is always last and order is fine
__A : Dict = True
__A : Dict = True
__A : Dict = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : List[str] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Union[str, Any] = 1
self.assertEqual(out_len + added_hidden_states , len(_UpperCAmelCase))
__A : Optional[Any] = outputs.attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
__A : Tuple = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : List[str] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Optional[Any] = outputs.hidden_states
__A : List[str] = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
# YOLOS has a different seq_length
__A : Dict = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
__A ,__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : List[str] = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A : Optional[int] = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*_UpperCAmelCase)
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : List[Any] = YolosModel.from_pretrained(_UpperCAmelCase)
self.assertIsNotNone(_UpperCAmelCase)
def _lowerCAmelCase ( ) -> int:
__A : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('hustvl/yolos-small') if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = YolosForObjectDetection.from_pretrained('hustvl/yolos-small').to(_UpperCAmelCase)
__A : Any = self.default_image_processor
__A : str = prepare_img()
__A : int = image_processor(images=_UpperCAmelCase , return_tensors='pt').to(_UpperCAmelCase)
# forward pass
with torch.no_grad():
__A : str = model(inputs.pixel_values)
# verify outputs
__A : Tuple = torch.Size((1, 100, 92))
self.assertEqual(outputs.logits.shape , _UpperCAmelCase)
__A : Dict = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=_UpperCAmelCase , )
__A : int = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=_UpperCAmelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _UpperCAmelCase , atol=1e-4))
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , _UpperCAmelCase , atol=1e-4))
# verify postprocessing
__A : List[str] = image_processor.post_process_object_detection(
_UpperCAmelCase , threshold=0.3 , target_sizes=[image.size[::-1]])[0]
__A : Optional[int] = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861]).to(_UpperCAmelCase)
__A : Union[str, Any] = [75, 75, 17, 63, 17]
__A : Any = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495]).to(_UpperCAmelCase)
self.assertEqual(len(results['scores']) , 5)
self.assertTrue(torch.allclose(results['scores'] , _UpperCAmelCase , atol=1e-4))
self.assertSequenceEqual(results['labels'].tolist() , _UpperCAmelCase)
self.assertTrue(torch.allclose(results['boxes'][0, :] , _UpperCAmelCase)) | 8 | 0 |
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class A ( __lowercase , unittest.TestCase ):
_snake_case =PriorTransformer
_snake_case ='''hidden_states'''
@property
def lowerCAmelCase__ ( self: str ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =4
UpperCAmelCase_ =8
UpperCAmelCase_ =7
UpperCAmelCase_ =floats_tensor((batch_size, embedding_dim) ).to(_lowerCAmelCase )
UpperCAmelCase_ =floats_tensor((batch_size, embedding_dim) ).to(_lowerCAmelCase )
UpperCAmelCase_ =floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(_lowerCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def lowerCAmelCase__ ( self: Tuple , _lowerCAmelCase: Tuple=0 ) -> Tuple:
'''simple docstring'''
torch.manual_seed(_lowerCAmelCase )
UpperCAmelCase_ =4
UpperCAmelCase_ =8
UpperCAmelCase_ =7
UpperCAmelCase_ =torch.randn((batch_size, embedding_dim) ).to(_lowerCAmelCase )
UpperCAmelCase_ =torch.randn((batch_size, embedding_dim) ).to(_lowerCAmelCase )
UpperCAmelCase_ =torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_lowerCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def lowerCAmelCase__ ( self: List[Any] ) -> str:
'''simple docstring'''
return (4, 8)
@property
def lowerCAmelCase__ ( self: Optional[int] ) -> Optional[int]:
'''simple docstring'''
return (4, 8)
def lowerCAmelCase__ ( self: Optional[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ ={
"num_attention_heads": 2,
"attention_head_dim": 4,
"num_layers": 2,
"embedding_dim": 8,
"num_embeddings": 7,
"additional_embeddings": 4,
}
UpperCAmelCase_ =self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase__ ( self: List[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =PriorTransformer.from_pretrained(
"hf-internal-testing/prior-dummy" , output_loading_info=_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(_lowerCAmelCase )
UpperCAmelCase_ =model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def lowerCAmelCase__ ( self: Any ) -> Any:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =self.prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ =self.model_class(**_lowerCAmelCase )
UpperCAmelCase_ =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ =[*signature.parameters.keys()]
UpperCAmelCase_ =["hidden_states", "timestep"]
self.assertListEqual(arg_names[:2] , _lowerCAmelCase )
def lowerCAmelCase__ ( self: List[str] ) -> int:
'''simple docstring'''
UpperCAmelCase_ =PriorTransformer.from_pretrained("hf-internal-testing/prior-dummy" )
UpperCAmelCase_ =model.to(_lowerCAmelCase )
if hasattr(_lowerCAmelCase , "set_default_attn_processor" ):
model.set_default_attn_processor()
UpperCAmelCase_ =self.get_dummy_seed_input()
with torch.no_grad():
UpperCAmelCase_ =model(**_lowerCAmelCase )[0]
UpperCAmelCase_ =output[0, :5].flatten().cpu()
print(_lowerCAmelCase )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
UpperCAmelCase_ =torch.tensor([-1.34_36, -0.28_70, 0.75_38, 0.43_68, -0.02_39] )
self.assertTrue(torch_all_close(_lowerCAmelCase , _lowerCAmelCase , rtol=1e-2 ) )
@slow
class A ( unittest.TestCase ):
def lowerCAmelCase__ ( self: Dict , _lowerCAmelCase: str=1 , _lowerCAmelCase: Any=768 , _lowerCAmelCase: Any=77 , _lowerCAmelCase: Optional[int]=0 ) -> str:
'''simple docstring'''
torch.manual_seed(_lowerCAmelCase )
UpperCAmelCase_ =batch_size
UpperCAmelCase_ =embedding_dim
UpperCAmelCase_ =num_embeddings
UpperCAmelCase_ =torch.randn((batch_size, embedding_dim) ).to(_lowerCAmelCase )
UpperCAmelCase_ =torch.randn((batch_size, embedding_dim) ).to(_lowerCAmelCase )
UpperCAmelCase_ =torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_lowerCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def lowerCAmelCase__ ( self: Optional[int] ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.58_61, 0.12_83, -0.09_31, 0.08_82, 0.44_76, 0.13_29, -0.04_98, 0.06_40]],
[37, [-0.49_13, 0.01_10, -0.04_83, 0.05_41, 0.49_54, -0.01_70, 0.03_54, 0.16_51]],
# fmt: on
] )
def lowerCAmelCase__ ( self: Tuple , _lowerCAmelCase: List[str] , _lowerCAmelCase: List[str] ) -> int:
'''simple docstring'''
UpperCAmelCase_ =PriorTransformer.from_pretrained("kandinsky-community/kandinsky-2-1-prior" , subfolder="prior" )
model.to(_lowerCAmelCase )
UpperCAmelCase_ =self.get_dummy_seed_input(seed=_lowerCAmelCase )
with torch.no_grad():
UpperCAmelCase_ =model(**_lowerCAmelCase )[0]
assert list(sample.shape ) == [1, 768]
UpperCAmelCase_ =sample[0, :8].flatten().cpu()
print(_lowerCAmelCase )
UpperCAmelCase_ =torch.tensor(_lowerCAmelCase )
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 )
| 54 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
lowercase__ : Optional[int] = None
lowercase__ : List[str] = logging.get_logger(__name__)
lowercase__ : Optional[Any] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase__ : List[str] = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/tokenizer.json''',
},
}
lowercase__ : Dict = {
'''camembert-base''': 5_12,
}
lowercase__ : str = '''▁'''
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = ['''input_ids''', '''attention_mask''']
lowerCAmelCase = CamembertTokenizer
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] , **_UpperCAmelCase , ):
'''simple docstring'''
__A : int = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else mask_token
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
__A : List[str] = vocab_file
__A : Optional[int] = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__A : Optional[Any] = [self.cls_token_id]
__A : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
__A : Optional[int] = [self.sep_token_id]
__A : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(_UpperCAmelCase):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
__A : List[Any] = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(_UpperCAmelCase):
copyfile(self.vocab_file , _UpperCAmelCase)
return (out_vocab_file,) | 8 | 0 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def UpperCAmelCase ( a_ , a_ , a_=1_0_2_4 , a_=1_0_2_4 , a_=False , **a_ ) -> Union[str, Any]:
"""simple docstring"""
__A = AutoTokenizer.from_pretrained(a_ )
__A = SeqaSeqDataset(a_ , a_ , a_ , a_ , type_path="train" , **a_ )
__A = tok.pad_token_id
def get_lens(a_ ):
__A = tqdm(
DataLoader(a_ , batch_size=5_1_2 , num_workers=8 , shuffle=a_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
__A = []
for batch in dl:
__A = batch["input_ids"].ne(a_ ).sum(1 ).tolist()
__A = batch["labels"].ne(a_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(a_ , a_ ):
max_lens.append(max(a_ , a_ ) )
else:
max_lens.extend(a_ )
return max_lens
__A = get_lens(a_ )
__A = SeqaSeqDataset(a_ , a_ , a_ , a_ , type_path="val" , **a_ )
__A = get_lens(a_ )
pickle_save(a_ , train_ds.len_file )
pickle_save(a_ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 55 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
lowercase__ : Any = '''hf-internal-testing/tiny-random-bert'''
lowercase__ : Optional[Any] = os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''')
lowercase__ : List[Any] = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6'''
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase)
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(_UpperCAmelCase))
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(_UpperCAmelCase , _UpperCAmelCase)))
with open(os.path.join(_UpperCAmelCase , 'refs' , 'main')) as f:
__A : Any = f.read()
self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'snapshots' , _UpperCAmelCase , _UpperCAmelCase))
self.assertTrue(os.path.isfile(_UpperCAmelCase))
# File is cached at the same place the second time.
__A : Tuple = cached_file(_UpperCAmelCase , _UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase)
# Using a specific revision to test the full commit hash.
__A : List[Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='9b8c223')
self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'snapshots' , _UpperCAmelCase , _UpperCAmelCase))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid model identifier'):
__A : Dict = cached_file('tiny-random-bert' , _UpperCAmelCase)
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid git identifier'):
__A : Optional[int] = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='aaaa')
with self.assertRaisesRegex(_UpperCAmelCase , 'does not appear to have a file named'):
__A : int = cached_file(_UpperCAmelCase , 'conf')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaisesRegex(_UpperCAmelCase , 'does not appear to have a file named'):
__A : Any = cached_file(_UpperCAmelCase , 'conf')
with open(os.path.join(_UpperCAmelCase , 'refs' , 'main')) as f:
__A : Dict = f.read()
self.assertTrue(os.path.isfile(os.path.join(_UpperCAmelCase , '.no_exist' , _UpperCAmelCase , 'conf')))
__A : List[Any] = cached_file(_UpperCAmelCase , 'conf' , _raise_exceptions_for_missing_entries=_UpperCAmelCase)
self.assertIsNone(_UpperCAmelCase)
__A : str = cached_file(_UpperCAmelCase , 'conf' , local_files_only=_UpperCAmelCase , _raise_exceptions_for_missing_entries=_UpperCAmelCase)
self.assertIsNone(_UpperCAmelCase)
__A : List[str] = mock.Mock()
__A : Dict = 500
__A : List[str] = {}
__A : List[Any] = HTTPError
__A : Optional[Any] = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=_UpperCAmelCase) as mock_head:
__A : Dict = cached_file(_UpperCAmelCase , 'conf' , _raise_exceptions_for_connection_errors=_UpperCAmelCase)
self.assertIsNone(_UpperCAmelCase)
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.assertTrue(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase))
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase))
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.assertIsNone(get_file_from_repo('bert-base-cased' , 'ahah.txt'))
# The function raises if the repository does not exist.
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid model identifier'):
get_file_from_repo('bert-base-case' , _UpperCAmelCase)
# The function raises if the revision does not exist.
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid git identifier'):
get_file_from_repo('bert-base-cased' , _UpperCAmelCase , revision='ahaha')
__A : List[str] = get_file_from_repo('bert-base-cased' , _UpperCAmelCase)
# The name is the cached name which is not very easy to test, so instead we load the content.
__A : List[str] = json.loads(open(_UpperCAmelCase , 'r').read())
self.assertEqual(config['hidden_size'] , 768)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__A : Tuple = Path(_UpperCAmelCase) / 'a.txt'
filename.touch()
self.assertEqual(get_file_from_repo(_UpperCAmelCase , 'a.txt') , str(_UpperCAmelCase))
self.assertIsNone(get_file_from_repo(_UpperCAmelCase , 'b.txt')) | 8 | 0 |
'''simple docstring'''
import os
from typing import Dict, List, Tuple, TypeVar, Union
_a : str = TypeVar("T")
_a : Dict = Union[List[T], Tuple[T, ...]]
_a : str = Union[T, List[T], Dict[str, T]]
_a : Union[str, Any] = Union[str, bytes, os.PathLike]
| 56 |
'''simple docstring'''
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def _lowerCAmelCase ( __snake_case : str , __snake_case : str , **__snake_case : List[Any] ) -> Any:
__A : Optional[Any] = AutoConfig.from_pretrained(__snake_case , **__snake_case )
__A : int = AutoModelForSeqaSeqLM.from_config(__snake_case )
model.save_pretrained(__snake_case )
AutoTokenizer.from_pretrained(__snake_case ).save_pretrained(__snake_case )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version) | 8 | 0 |
from math import log
from scipy.constants import Boltzmann, physical_constants
A_ : Tuple = 300 # TEMPERATURE (unit = K)
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ) -> float:
if donor_conc <= 0:
raise ValueError('Donor concentration should be positive' )
elif acceptor_conc <= 0:
raise ValueError('Acceptor concentration should be positive' )
elif intrinsic_conc <= 0:
raise ValueError('Intrinsic concentration should be positive' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'Donor concentration should be greater than intrinsic concentration' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'Acceptor concentration should be greater than intrinsic concentration' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod() | 57 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
lowercase__ : Any = {
'''google/tapas-base-finetuned-sqa''': (
'''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wtq''': (
'''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wikisql-supervised''': (
'''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-tabfact''': (
'''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''tapas'''
def __init__( self , _UpperCAmelCase=3_0522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1024 , _UpperCAmelCase=[3, 256, 256, 2, 256, 256, 10] , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0 , _UpperCAmelCase=10.0 , _UpperCAmelCase=0 , _UpperCAmelCase=1.0 , _UpperCAmelCase=None , _UpperCAmelCase=1.0 , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=1.0 , _UpperCAmelCase=1.0 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase="ratio" , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=64 , _UpperCAmelCase=32 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase)
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
__A : Dict = vocab_size
__A : Tuple = hidden_size
__A : Any = num_hidden_layers
__A : int = num_attention_heads
__A : Tuple = hidden_act
__A : Tuple = intermediate_size
__A : List[Any] = hidden_dropout_prob
__A : int = attention_probs_dropout_prob
__A : List[str] = max_position_embeddings
__A : Optional[int] = type_vocab_sizes
__A : str = initializer_range
__A : List[str] = layer_norm_eps
# Fine-tuning task hyperparameters
__A : List[str] = positive_label_weight
__A : List[Any] = num_aggregation_labels
__A : Optional[Any] = aggregation_loss_weight
__A : Tuple = use_answer_as_supervision
__A : List[str] = answer_loss_importance
__A : Any = use_normalized_answer_loss
__A : Any = huber_loss_delta
__A : Union[str, Any] = temperature
__A : Tuple = aggregation_temperature
__A : Optional[Any] = use_gumbel_for_cells
__A : List[str] = use_gumbel_for_aggregation
__A : Tuple = average_approximation_function
__A : List[str] = cell_selection_preference
__A : Dict = answer_loss_cutoff
__A : Union[str, Any] = max_num_rows
__A : Optional[Any] = max_num_columns
__A : int = average_logits_per_cell
__A : Optional[Any] = select_one_column
__A : int = allow_empty_column_selection
__A : List[Any] = init_cell_selection_weights_to_zero
__A : int = reset_position_index_per_cell
__A : Union[str, Any] = disable_per_token_loss
# Aggregation hyperparameters
__A : Optional[Any] = aggregation_labels
__A : List[str] = no_aggregation_label_index
if isinstance(self.aggregation_labels , _UpperCAmelCase):
__A : Optional[Any] = {int(_UpperCAmelCase): v for k, v in aggregation_labels.items()} | 8 | 0 |
"""simple docstring"""
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(_lowercase ):
snake_case_ : int = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
snake_case_ : str = FlaxAutoModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(_lowercase ):
snake_case_ : str = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
snake_case_ : Dict = FlaxAutoModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
snake_case_ : Optional[Any] = AutoTokenizer.from_pretrained(_lowercase )
snake_case_ : Optional[int] = FlaxBertModel.from_pretrained(_lowercase )
snake_case_ : int = tokenizer("""Do you support jax jitted function?""" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**_lowercase ):
return model(**_lowercase )
eval(**_lowercase ).block_until_ready()
@slow
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
snake_case_ : Any = AutoTokenizer.from_pretrained(_lowercase )
snake_case_ : Tuple = FlaxRobertaModel.from_pretrained(_lowercase )
snake_case_ : Union[str, Any] = tokenizer("""Do you support jax jitted function?""" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**_lowercase ):
return model(**_lowercase )
eval(**_lowercase ).block_until_ready()
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
with self.assertRaisesRegex(
_lowercase , """bert-base is not a local folder and is not a valid model identifier""" ):
snake_case_ : Optional[int] = FlaxAutoModel.from_pretrained("""bert-base""" )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
with self.assertRaisesRegex(
_lowercase , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
snake_case_ : List[Any] = FlaxAutoModel.from_pretrained(_lowercase , revision="""aaaaaa""" )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
with self.assertRaisesRegex(
_lowercase , """hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack""" , ):
snake_case_ : int = FlaxAutoModel.from_pretrained("""hf-internal-testing/config-no-model""" )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
with self.assertRaisesRegex(_lowercase , """Use `from_pt=True` to load this model""" ):
snake_case_ : Dict = FlaxAutoModel.from_pretrained("""hf-internal-testing/tiny-bert-pt-only""" )
| 58 |
'''simple docstring'''
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=sys.maxsize):
'''simple docstring'''
__A : Union[str, Any] = 'bilinear'
__A : int = max_size
__A : Optional[Any] = short_edge_length
def __call__( self , _UpperCAmelCase):
'''simple docstring'''
__A : int = []
for img in imgs:
__A ,__A : Dict = img.shape[:2]
# later: provide list and randomly choose index for resize
__A : List[Any] = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1)
if size == 0:
return img
__A : Tuple = size * 1.0 / min(_UpperCAmelCase , _UpperCAmelCase)
if h < w:
__A ,__A : Optional[Any] = size, scale * w
else:
__A ,__A : Optional[Any] = scale * h, size
if max(_UpperCAmelCase , _UpperCAmelCase) > self.max_size:
__A : Tuple = self.max_size * 1.0 / max(_UpperCAmelCase , _UpperCAmelCase)
__A : Tuple = newh * scale
__A : Dict = neww * scale
__A : Dict = int(neww + 0.5)
__A : Optional[int] = int(newh + 0.5)
if img.dtype == np.uinta:
__A : int = Image.fromarray(_UpperCAmelCase)
__A : Optional[int] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR)
__A : Dict = np.asarray(_UpperCAmelCase)
else:
__A : Optional[Any] = img.permute(2 , 0 , 1).unsqueeze(0) # 3, 0, 1) # hw(c) -> nchw
__A : Dict = nn.functional.interpolate(
_UpperCAmelCase , (newh, neww) , mode=self.interp_method , align_corners=_UpperCAmelCase).squeeze(0)
img_augs.append(_UpperCAmelCase)
return img_augs
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST)
__A : List[Any] = cfg.INPUT.FORMAT
__A : Dict = cfg.SIZE_DIVISIBILITY
__A : str = cfg.PAD_VALUE
__A : Union[str, Any] = cfg.INPUT.MAX_SIZE_TEST
__A : int = cfg.MODEL.DEVICE
__A : Tuple = torch.tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
__A : Union[str, Any] = torch.tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
__A : int = lambda _UpperCAmelCase: (x - self.pixel_mean) / self.pixel_std
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = tuple(max(_UpperCAmelCase) for s in zip(*[img.shape for img in images]))
__A : Dict = [im.shape[-2:] for im in images]
__A : Optional[int] = [
nn.functional.pad(
_UpperCAmelCase , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(_UpperCAmelCase , _UpperCAmelCase)
]
return torch.stack(_UpperCAmelCase), torch.tensor(_UpperCAmelCase)
def __call__( self , _UpperCAmelCase , _UpperCAmelCase=False):
'''simple docstring'''
with torch.no_grad():
if not isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : int = [images]
if single_image:
assert len(_UpperCAmelCase) == 1
for i in range(len(_UpperCAmelCase)):
if isinstance(images[i] , torch.Tensor):
images.insert(_UpperCAmelCase , images.pop(_UpperCAmelCase).to(self.device).float())
elif not isinstance(images[i] , torch.Tensor):
images.insert(
_UpperCAmelCase , torch.as_tensor(img_tensorize(images.pop(_UpperCAmelCase) , input_format=self.input_format))
.to(self.device)
.float() , )
# resize smallest edge
__A : str = torch.tensor([im.shape[:2] for im in images])
__A : List[str] = self.aug(_UpperCAmelCase)
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
__A : Any = [self.normalizer(_UpperCAmelCase) for x in images]
# now pad them to do the following operations
__A ,__A : Any = self.pad(_UpperCAmelCase)
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
__A : str = torch.true_divide(_UpperCAmelCase , _UpperCAmelCase)
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : str ) -> Dict:
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _lowerCAmelCase ( __snake_case : List[str] , __snake_case : Tuple[int, int] ) -> int:
assert torch.isfinite(__snake_case ).all(), "Box tensor contains infinite or NaN!"
__A ,__A : int = box_size
tensor[:, 0].clamp_(min=0 , max=__snake_case )
tensor[:, 1].clamp_(min=0 , max=__snake_case )
tensor[:, 2].clamp_(min=0 , max=__snake_case )
tensor[:, 3].clamp_(min=0 , max=__snake_case ) | 8 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 59 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> Optional[Any]: # noqa: E741
__A : Tuple = len(__snake_case )
__A : Optional[int] = 0
__A : str = [0] * n
__A : int = [False] * n
__A : Tuple = [False] * n
def dfs(__snake_case : List[str] , __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : int ):
if parent == root:
out_edge_count += 1
__A : str = True
__A : Tuple = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
__A : Optional[int] = dfs(__snake_case , __snake_case , __snake_case , __snake_case )
__A : int = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
__A : Tuple = True
# AP found via cycle
if at == low[to]:
__A : Optional[Any] = True
else:
__A : Any = min(low[at] , __snake_case )
return out_edge_count
for i in range(__snake_case ):
if not visited[i]:
__A : Tuple = 0
__A : List[Any] = dfs(__snake_case , __snake_case , -1 , __snake_case )
__A : Union[str, Any] = out_edge_count > 1
for x in range(len(__snake_case ) ):
if is_art[x] is True:
print(__snake_case )
# Adjacency list of graph
lowercase__ : Tuple = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data) | 8 | 0 |
from sklearn.metrics import matthews_corrcoef
import datasets
lowerCAmelCase_ = '''
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
'''
lowerCAmelCase_ = '''
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results[\'matthews_correlation\'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results[\'matthews_correlation\'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results[\'matthews_correlation\'], 2))
-0.25
'''
lowerCAmelCase_ = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'''
] , )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__=None ) -> int:
'''simple docstring'''
return {
"matthews_correlation": float(matthews_corrcoef(__magic_name__ , __magic_name__ , sample_weight=__magic_name__ ) ),
}
| 60 |
'''simple docstring'''
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
lowercase__ : int = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn.grep_linear''': '''encoder.layers.*.attention.gru_rel_pos_linear''',
'''self_attn.relative_attention_bias''': '''encoder.layers.*.attention.rel_attn_embed''',
'''self_attn.grep_a''': '''encoder.layers.*.attention.gru_rel_pos_const''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
lowercase__ : Dict = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def _lowerCAmelCase ( __snake_case : int , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Any , __snake_case : List[str] ) -> Union[str, Any]:
for attribute in key.split('.' ):
__A : int = getattr(__snake_case , __snake_case )
if weight_type is not None:
__A : Optional[int] = getattr(__snake_case , __snake_case ).shape
else:
__A : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
__A : Tuple = value
elif weight_type == "weight_g":
__A : Union[str, Any] = value
elif weight_type == "weight_v":
__A : Optional[Any] = value
elif weight_type == "bias":
__A : Optional[int] = value
else:
__A : Optional[int] = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _lowerCAmelCase ( __snake_case : Any , __snake_case : List[str] ) -> List[Any]:
__A : Optional[Any] = []
__A : Any = fairseq_model.state_dict()
__A : Union[str, Any] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
__A : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == 'group' , )
__A : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__A : int = True
if "*" in mapped_key:
__A : Any = name.split(__snake_case )[0].split('.' )[-2]
__A : List[Any] = mapped_key.replace('*' , __snake_case )
if "weight_g" in name:
__A : Optional[Any] = 'weight_g'
elif "weight_v" in name:
__A : Union[str, Any] = 'weight_v'
elif "bias" in name and "relative_attention_bias" not in name:
__A : Optional[Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__A : Tuple = 'weight'
else:
__A : Dict = None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(f'Unused weights: {unused_weights}' )
def _lowerCAmelCase ( __snake_case : List[str] , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Optional[int] ) -> int:
__A : int = full_name.split('conv_layers.' )[-1]
__A : List[str] = name.split('.' )
__A : Optional[int] = int(items[0] )
__A : str = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
__A : Optional[int] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
__A : Union[str, Any] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
__A : Dict = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
__A : Any = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__snake_case )
@torch.no_grad()
def _lowerCAmelCase ( __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Tuple=None ) -> Any:
# load the pre-trained checkpoints
__A : List[str] = torch.load(__snake_case )
__A : Dict = WavLMConfigOrig(checkpoint['cfg'] )
__A : Optional[int] = WavLMOrig(__snake_case )
model.load_state_dict(checkpoint['model'] )
model.eval()
if config_path is not None:
__A : List[Any] = WavLMConfig.from_pretrained(__snake_case )
else:
__A : Dict = WavLMConfig()
__A : Optional[Any] = WavLMModel(__snake_case )
recursively_load_weights(__snake_case , __snake_case )
hf_wavlm.save_pretrained(__snake_case )
if __name__ == "__main__":
lowercase__ : Tuple = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowercase__ : Any = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 8 | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.