code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__SCREAMING_SNAKE_CASE : str =logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( __lowercase ):
"""simple docstring"""
A__ : List[Any] = ["pixel_values"]
def __init__( self , A = True , A = 32 , A=PILImageResampling.BILINEAR , A = True , **A , ) -> None:
A: Optional[int] = do_resize
A: Union[str, Any] = do_rescale
A: Any = size_divisor
A: Dict = resample
super().__init__(**SCREAMING_SNAKE_CASE_ )
def a__ ( self , A , A , A , A = None , **A ) -> np.ndarray:
A , A: str = get_image_size(SCREAMING_SNAKE_CASE_ )
# Rounds the height and width down to the closest multiple of size_divisor
A: List[str] = height // size_divisor * size_divisor
A: Any = width // size_divisor * size_divisor
A: Any = resize(SCREAMING_SNAKE_CASE_ , (new_h, new_w) , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
return image
def a__ ( self , A , A , A = None , **A ) -> np.ndarray:
return rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def a__ ( self , A , A = None , A = None , A=None , A = None , A = None , A = ChannelDimension.FIRST , **A , ) -> BatchFeature:
A: List[str] = do_resize if do_resize is not None else self.do_resize
A: Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
A: Optional[Any] = size_divisor if size_divisor is not None else self.size_divisor
A: Optional[int] = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("""size_divisor is required for resizing""" )
A: str = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError("""Invalid image(s)""" )
# All transformations expect numpy arrays.
A: List[Any] = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for img in images]
if do_resize:
A: str = [self.resize(SCREAMING_SNAKE_CASE_ , size_divisor=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
A: Optional[int] = [self.rescale(SCREAMING_SNAKE_CASE_ , scale=1 / 2_55 ) for image in images]
A: Optional[Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images]
A: List[str] = {"""pixel_values""": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
| 135 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
_a : int = get_tests_dir("fixtures/test_sentencepiece.model")
_a : Dict = {"target_lang": "fi", "source_lang": "en"}
_a : Optional[int] = ">>zh<<"
_a : List[str] = "Helsinki-NLP/"
if is_torch_available():
_a : List[str] = "pt"
elif is_tf_available():
_a : Dict = "tf"
else:
_a : Union[str, Any] = "jax"
@require_sentencepiece
class _lowercase ( __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : int = MarianTokenizer
_SCREAMING_SNAKE_CASE : str = False
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
def a ( self : int ) -> int:
super().setUp()
__snake_case = ['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>']
__snake_case = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
__snake_case = Path(self.tmpdirname )
save_json(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['vocab'] )
save_json(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['tokenizer_config_file'] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['source_spm'] )
copyfile(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['target_spm'] )
__snake_case = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def a ( self : int , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> MarianTokenizer:
return MarianTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def a ( self : str , SCREAMING_SNAKE_CASE_ : List[str] ) -> List[Any]:
return (
"This is a test",
"This is a test",
)
def a ( self : int ) -> Optional[Any]:
__snake_case = '</s>'
__snake_case = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def a ( self : Dict ) -> List[str]:
__snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '</s>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '<pad>' )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 9 )
def a ( self : List[Any] ) -> str:
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def a ( self : Any ) -> Optional[int]:
__snake_case = MarianTokenizer.from_pretrained(f'{ORG_NAME}opus-mt-en-de' )
__snake_case = en_de_tokenizer(['I am a small frog'] , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = [38, 121, 14, 697, 3_8848, 0]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , batch.input_ids[0] )
__snake_case = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
__snake_case = [x.name for x in Path(SCREAMING_SNAKE_CASE_ ).glob('*' )]
self.assertIn('source.spm' , SCREAMING_SNAKE_CASE_ )
MarianTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[int] ) -> Any:
__snake_case = self.get_tokenizer()
__snake_case = tok(
['I am a small frog' * 1000, 'I am a small frog'] , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def a ( self : Tuple ) -> Dict:
__snake_case = self.get_tokenizer()
__snake_case = tok(['I am a tiny frog', 'I am a small frog'] , padding=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def a ( self : int ) -> int:
# fmt: off
__snake_case = {'input_ids': [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE_ , model_name='Helsinki-NLP/opus-mt-en-de' , revision='1a8c2263da11e68e50938f97e10cd57820bd504c' , decode_kwargs={'use_source_tokenizer': True} , )
def a ( self : Dict ) -> str:
__snake_case = MarianTokenizer.from_pretrained('hf-internal-testing/test-marian-two-vocabs' )
__snake_case = 'Tämä on testi'
__snake_case = 'This is a test'
__snake_case = [76, 7, 2047, 2]
__snake_case = [69, 12, 11, 940, 2]
__snake_case = tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer(text_target=SCREAMING_SNAKE_CASE_ ).input_ids
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 56 | 0 |
"""simple docstring"""
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
__SCREAMING_SNAKE_CASE = [
"cross_validation.py",
"gradient_accumulation.py",
"local_sgd.py",
"multi_process_metrics.py",
"memory.py",
"automatic_gradient_accumulation.py",
"fsdp_with_peak_mem_tracking.py",
"deepspeed_with_config_support.py",
"megatron_lm_gpt_pretraining.py",
]
class __UpperCamelCase ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Tuple , UpperCAmelCase : str , UpperCAmelCase : bool , UpperCAmelCase : str = None , UpperCAmelCase : list = None ) -> Optional[Any]:
lowerCAmelCase :List[Any] = None
lowerCAmelCase :Optional[Any] = os.path.abspath(os.path.join('examples' , 'by_feature' ) )
lowerCAmelCase :Tuple = os.path.abspath('examples' )
for item in os.listdir(SCREAMING_SNAKE_CASE_ ):
if item not in EXCLUDE_EXAMPLES:
lowerCAmelCase :str = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if os.path.isfile(SCREAMING_SNAKE_CASE_ ) and ".py" in item_path:
with self.subTest(
tested_script=SCREAMING_SNAKE_CASE_ , feature_script=SCREAMING_SNAKE_CASE_ , tested_section='main()' if parser_only else 'training_function()' , ):
lowerCAmelCase :Optional[int] = compare_against_test(
os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase :List[str] = '\n'.join(SCREAMING_SNAKE_CASE_ )
if special_strings is not None:
for string in special_strings:
lowerCAmelCase :Any = diff.replace(SCREAMING_SNAKE_CASE_ , '' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , '' )
def UpperCAmelCase__ ( self : List[str] ) -> Tuple:
self.one_complete_example('complete_nlp_example.py' , SCREAMING_SNAKE_CASE_ )
self.one_complete_example('complete_nlp_example.py' , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[int]:
lowerCAmelCase :List[Any] = os.path.abspath(os.path.join('examples' , 'cv_example.py' ) )
lowerCAmelCase :Any = [
' ' * 16 + '{\n\n',
' ' * 20 + '"accuracy": eval_metric["accuracy"],\n\n',
' ' * 20 + '"f1": eval_metric["f1"],\n\n',
' ' * 20 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n',
' ' * 20 + '"epoch": epoch,\n\n',
' ' * 16 + '},\n\n',
' ' * 16 + 'step=epoch,\n',
' ' * 12,
' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n',
]
self.one_complete_example('complete_cv_example.py' , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.one_complete_example('complete_cv_example.py' , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""} )
class __UpperCamelCase ( __lowercase ):
lowercase_ : str = False
@classmethod
def UpperCAmelCase__ ( cls : str ) -> List[str]:
super().setUpClass()
lowerCAmelCase :int = tempfile.mkdtemp()
lowerCAmelCase :Optional[int] = os.path.join(cls._tmpdir , 'default_config.yml' )
write_basic_config(save_location=cls.configPath )
lowerCAmelCase :Dict = ['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def UpperCAmelCase__ ( cls : Union[str, Any] ) -> Dict:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def UpperCAmelCase__ ( self : Tuple ) -> Union[str, Any]:
lowerCAmelCase :List[Any] = f"""\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n """.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'epoch_0' ) ) )
def UpperCAmelCase__ ( self : Any ) -> Tuple:
lowerCAmelCase :List[str] = f"""\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n """.split()
lowerCAmelCase :Tuple = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'step_2' ) ) )
def UpperCAmelCase__ ( self : str ) -> Tuple:
lowerCAmelCase :Any = f"""\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}\n """.split()
lowerCAmelCase :Optional[Any] = run_command(self._launch_args + testargs , return_stdout=SCREAMING_SNAKE_CASE_ )
self.assertNotIn('epoch 0:' , SCREAMING_SNAKE_CASE_ )
self.assertIn('epoch 1:' , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[str]:
lowerCAmelCase :Dict = f"""\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}\n """.split()
lowerCAmelCase :Dict = run_command(self._launch_args + testargs , return_stdout=SCREAMING_SNAKE_CASE_ )
if torch.cuda.is_available():
lowerCAmelCase :int = torch.cuda.device_count()
else:
lowerCAmelCase :Dict = 1
if num_processes > 1:
self.assertNotIn('epoch 0:' , SCREAMING_SNAKE_CASE_ )
self.assertIn('epoch 1:' , SCREAMING_SNAKE_CASE_ )
else:
self.assertIn('epoch 0:' , SCREAMING_SNAKE_CASE_ )
self.assertIn('epoch 1:' , SCREAMING_SNAKE_CASE_ )
@slow
def UpperCAmelCase__ ( self : List[str] ) -> str:
lowerCAmelCase :Dict = '\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split()
with mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '0'} ):
lowerCAmelCase :List[Any] = run_command(self._launch_args + testargs , return_stdout=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase :str = re.findall('({.+})' , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase :Any = [r for r in results if 'accuracy' in r][-1]
lowerCAmelCase :List[str] = ast.literal_eval(SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(results['accuracy'] , 0.7_5 )
def UpperCAmelCase__ ( self : List[Any] ) -> Union[str, Any]:
lowerCAmelCase :int = ['examples/by_feature/multi_process_metrics.py']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def UpperCAmelCase__ ( self : Any ) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdir:
lowerCAmelCase :str = f"""\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n """.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , 'tracking' ) ) )
def UpperCAmelCase__ ( self : List[str] ) -> List[str]:
lowerCAmelCase :Optional[Any] = ['examples/by_feature/gradient_accumulation.py']
run_command(self._launch_args + testargs )
def UpperCAmelCase__ ( self : str ) -> Dict:
lowerCAmelCase :int = ['examples/by_feature/local_sgd.py']
run_command(self._launch_args + testargs )
| 553 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def _a (lowercase__ : bytes ) -> bytes:
"""simple docstring"""
if len(lowercase__ ) != 3_2:
raise ValueError('Input must be of length 32' )
__snake_case = B''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def _a (lowercase__ : int ) -> bytes:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
__snake_case = format(lowercase__ , '08x' )[-8:]
__snake_case = B''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def _a (lowercase__ : bytes ) -> bytes:
"""simple docstring"""
__snake_case = B''
for char in message:
bit_string += format(lowercase__ , '08b' ).encode('utf-8' )
__snake_case = format(len(lowercase__ ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(lowercase__ ) % 5_1_2 != 4_4_8:
bit_string += b"0"
bit_string += to_little_endian(start_len[3_2:] ) + to_little_endian(start_len[:3_2] )
return bit_string
def _a (lowercase__ : bytes ) -> Generator[list[int], None, None]:
"""simple docstring"""
if len(lowercase__ ) % 5_1_2 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(lowercase__ ) , 5_1_2 ):
__snake_case = bit_string[pos : pos + 5_1_2]
__snake_case = []
for i in range(0 , 5_1_2 , 3_2 ):
block_words.append(int(to_little_endian(block[i : i + 3_2] ) , 2 ) )
yield block_words
def _a (lowercase__ : int ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
__snake_case = format(lowercase__ , '032b' )
__snake_case = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(lowercase__ , 2 )
def _a (lowercase__ : int , lowercase__ : int ) -> int:
"""simple docstring"""
return (a + b) % 2**3_2
def _a (lowercase__ : int , lowercase__ : int ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (3_2 - shift))) % 2**3_2
def _a (lowercase__ : bytes ) -> bytes:
"""simple docstring"""
__snake_case = preprocess(lowercase__ )
__snake_case = [int(2**3_2 * abs(sin(i + 1 ) ) ) for i in range(6_4 )]
# Starting states
__snake_case = 0x6_7_4_5_2_3_0_1
__snake_case = 0xE_F_C_D_A_B_8_9
__snake_case = 0x9_8_B_A_D_C_F_E
__snake_case = 0x1_0_3_2_5_4_7_6
__snake_case = [
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(lowercase__ ):
__snake_case = aa
__snake_case = ba
__snake_case = ca
__snake_case = da
# Hash current chunk
for i in range(6_4 ):
if i <= 1_5:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
__snake_case = d ^ (b & (c ^ d))
__snake_case = i
elif i <= 3_1:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
__snake_case = c ^ (d & (b ^ c))
__snake_case = (5 * i + 1) % 1_6
elif i <= 4_7:
__snake_case = b ^ c ^ d
__snake_case = (3 * i + 5) % 1_6
else:
__snake_case = c ^ (b | not_aa(lowercase__ ))
__snake_case = (7 * i) % 1_6
__snake_case = (f + a + added_consts[i] + block_words[g]) % 2**3_2
__snake_case = d
__snake_case = c
__snake_case = b
__snake_case = sum_aa(lowercase__ , left_rotate_aa(lowercase__ , shift_amounts[i] ) )
# Add hashed chunk to running total
__snake_case = sum_aa(lowercase__ , lowercase__ )
__snake_case = sum_aa(lowercase__ , lowercase__ )
__snake_case = sum_aa(lowercase__ , lowercase__ )
__snake_case = sum_aa(lowercase__ , lowercase__ )
__snake_case = reformat_hex(lowercase__ ) + reformat_hex(lowercase__ ) + reformat_hex(lowercase__ ) + reformat_hex(lowercase__ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 | 0 |
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] ):
SCREAMING_SNAKE_CASE__ = int(lowercase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = t // 3_600, (t // 60) % 60, t % 60
return f'''{h}:{m:02d}:{s:02d}''' if h != 0 else f'''{m:02d}:{s:02d}'''
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Tuple , UpperCamelCase__: List[str]=300 ):
return f'''\n <div>\n {prefix}\n <progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>\n {label}\n </div>\n '''
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[int] ):
SCREAMING_SNAKE_CASE__ = """<table border=\"1\" class=\"dataframe\">\n"""
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
SCREAMING_SNAKE_CASE__ = f'''{elt:.6f}''' if isinstance(lowercase__ , lowercase__ ) else str(lowercase__ )
html_code += f''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class UpperCamelCase_ :
lowerCamelCase_ = 5
lowerCamelCase_ = 0.2
def __init__( self :Tuple , __A :int , __A :Optional[str] = None , __A :bool = True , __A :Optional["NotebookTrainingTracker"] = None , __A :int = 300 , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = total
SCREAMING_SNAKE_CASE__ = """""" if prefix is None else prefix
SCREAMING_SNAKE_CASE__ = leave
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = width
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
def _snake_case ( self :str , __A :int , __A :bool = False , __A :str = None ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = value
if comment is not None:
SCREAMING_SNAKE_CASE__ = comment
if self.last_value is None:
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = time.time()
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = value
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = self.warmup
SCREAMING_SNAKE_CASE__ = 1
self.update_bar(SCREAMING_SNAKE_CASE_ )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
SCREAMING_SNAKE_CASE__ = time.time()
SCREAMING_SNAKE_CASE__ = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
SCREAMING_SNAKE_CASE__ = self.elapsed_time / (value - self.start_value)
else:
SCREAMING_SNAKE_CASE__ = None
if value >= self.total:
SCREAMING_SNAKE_CASE__ = self.total
SCREAMING_SNAKE_CASE__ = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
SCREAMING_SNAKE_CASE__ = self.average_time_per_item * (self.total - value)
self.update_bar(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE__ = value
SCREAMING_SNAKE_CASE__ = current_time
if self.average_time_per_item is None:
SCREAMING_SNAKE_CASE__ = 1
else:
SCREAMING_SNAKE_CASE__ = max(int(self.update_every / self.average_time_per_item ) , 1 )
def _snake_case ( self :List[Any] , __A :List[str] , __A :List[str]=None ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """ """ * (len(str(self.total ) ) - len(str(SCREAMING_SNAKE_CASE_ ) )) + str(SCREAMING_SNAKE_CASE_ )
if self.elapsed_time is None:
SCREAMING_SNAKE_CASE__ = f'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
SCREAMING_SNAKE_CASE__ = f'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'''
else:
SCREAMING_SNAKE_CASE__ = (
f'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'''
f''' {format_time(self.predicted_remaining )}'''
)
self.label += f''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment ) == 0 else f''', {self.comment}]'''
self.display()
def _snake_case ( self :Optional[int] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
SCREAMING_SNAKE_CASE__ = disp.display(disp.HTML(self.html_code ) , display_id=SCREAMING_SNAKE_CASE_ )
else:
self.output.update(disp.HTML(self.html_code ) )
def _snake_case ( self :Dict ) -> Union[str, Any]:
"""simple docstring"""
if self.parent is None and self.output is not None:
self.output.update(disp.HTML("""""" ) )
class UpperCamelCase_ ( __lowercase ):
def __init__( self :Optional[int] , __A :Union[str, Any] , __A :List[Any]=None ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE__ = None if column_names is None else [column_names]
SCREAMING_SNAKE_CASE__ = None
def _snake_case ( self :Dict ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
SCREAMING_SNAKE_CASE__ = disp.display(disp.HTML(self.html_code ) , display_id=SCREAMING_SNAKE_CASE_ )
else:
self.output.update(disp.HTML(self.html_code ) )
def _snake_case ( self :Optional[Any] , __A :Optional[int] ) -> Optional[Any]:
"""simple docstring"""
if self.inner_table is None:
SCREAMING_SNAKE_CASE__ = [list(values.keys() ), list(values.values() )]
else:
SCREAMING_SNAKE_CASE__ = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE__ = columns
self.inner_table.append([values[c] for c in columns] )
def _snake_case ( self :List[Any] , __A :Tuple , __A :int=None , __A :Optional[int]=300 ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = NotebookProgressBar(SCREAMING_SNAKE_CASE_ , prefix=SCREAMING_SNAKE_CASE_ , parent=self , width=SCREAMING_SNAKE_CASE_ )
return self.child_bar
def _snake_case ( self :Optional[int] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = None
self.display()
class UpperCamelCase_ ( __lowercase ):
def __init__( self :Any ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = False
def _snake_case ( self :Optional[Any] , __A :Any , __A :Tuple , __A :Optional[Any] , **__A :Any ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """Epoch""" if args.evaluation_strategy == IntervalStrategy.EPOCH else """Step"""
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = [self.first_column] + ["""Training Loss"""]
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append("""Validation Loss""" )
SCREAMING_SNAKE_CASE__ = NotebookTrainingTracker(state.max_steps , SCREAMING_SNAKE_CASE_ )
def _snake_case ( self :Union[str, Any] , __A :Union[str, Any] , __A :Union[str, Any] , __A :Union[str, Any] , **__A :Tuple ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = int(state.epoch ) if int(state.epoch ) == state.epoch else f'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1 , comment=f'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , )
SCREAMING_SNAKE_CASE__ = False
def _snake_case ( self :List[Any] , __A :Union[str, Any] , __A :Optional[Any] , __A :Optional[int] , __A :Optional[int]=None , **__A :Tuple ) -> Tuple:
"""simple docstring"""
if not has_length(SCREAMING_SNAKE_CASE_ ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
SCREAMING_SNAKE_CASE__ = self.training_tracker.add_child(len(SCREAMING_SNAKE_CASE_ ) )
else:
SCREAMING_SNAKE_CASE__ = NotebookProgressBar(len(SCREAMING_SNAKE_CASE_ ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def _snake_case ( self :List[str] , __A :Union[str, Any] , __A :List[str] , __A :Dict , **__A :List[str] ) -> Optional[int]:
"""simple docstring"""
if self.prediction_bar is not None:
self.prediction_bar.close()
SCREAMING_SNAKE_CASE__ = None
def _snake_case ( self :Any , __A :List[Any] , __A :List[str] , __A :str , __A :List[str]=None , **__A :List[str] ) -> Optional[int]:
"""simple docstring"""
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
SCREAMING_SNAKE_CASE__ = {"""Training Loss""": logs["""loss"""]}
# First column is necessarily Step sine we're not in epoch eval strategy
SCREAMING_SNAKE_CASE__ = state.global_step
self.training_tracker.write_line(SCREAMING_SNAKE_CASE_ )
def _snake_case ( self :Dict , __A :List[str] , __A :Tuple , __A :str , __A :Dict=None , **__A :List[str] ) -> List[Any]:
"""simple docstring"""
if self.training_tracker is not None:
SCREAMING_SNAKE_CASE__ = {"""Training Loss""": """No log""", """Validation Loss""": """No log"""}
for log in reversed(state.log_history ):
if "loss" in log:
SCREAMING_SNAKE_CASE__ = log["""loss"""]
break
if self.first_column == "Epoch":
SCREAMING_SNAKE_CASE__ = int(state.epoch )
else:
SCREAMING_SNAKE_CASE__ = state.global_step
SCREAMING_SNAKE_CASE__ = """eval"""
for k in metrics:
if k.endswith("""_loss""" ):
SCREAMING_SNAKE_CASE__ = re.sub(r"""\_loss$""" , """""" , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE__ = metrics.pop("""total_flos""" , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE__ = metrics.pop("""epoch""" , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE__ = metrics.pop(f'''{metric_key_prefix}_runtime''' , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE__ = metrics.pop(f'''{metric_key_prefix}_samples_per_second''' , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE__ = metrics.pop(f'''{metric_key_prefix}_steps_per_second''' , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE__ = metrics.pop(f'''{metric_key_prefix}_jit_compilation_time''' , SCREAMING_SNAKE_CASE_ )
for k, v in metrics.items():
if k == f'''{metric_key_prefix}_loss''':
SCREAMING_SNAKE_CASE__ = v
else:
SCREAMING_SNAKE_CASE__ = k.split("""_""" )
SCREAMING_SNAKE_CASE__ = """ """.join([part.capitalize() for part in splits[1:]] )
SCREAMING_SNAKE_CASE__ = v
self.training_tracker.write_line(SCREAMING_SNAKE_CASE_ )
self.training_tracker.remove_child()
SCREAMING_SNAKE_CASE__ = None
# Evaluation takes a long time so we should force the next update.
SCREAMING_SNAKE_CASE__ = True
def _snake_case ( self :str , __A :Tuple , __A :Optional[Any] , __A :List[Any] , **__A :List[Any] ) -> Optional[Any]:
"""simple docstring"""
self.training_tracker.update(
state.global_step , comment=f'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' , force_update=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE__ = None
| 6 |
'''simple docstring'''
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def _a (lowercase__ : str , lowercase__ : str , lowercase__ : Optional[str] = None ) -> str:
"""simple docstring"""
if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release:
# old versions of hfh don't url-encode the file path
__snake_case = quote(lowercase__ )
return hfh.hf_hub_url(lowercase__ , lowercase__ , repo_type='dataset' , revision=lowercase__ )
| 56 | 0 |
class lowercase_ :
"""simple docstring"""
def __init__( self ) ->Union[str, Any]:
lowerCAmelCase = ''''''
lowerCAmelCase = ''''''
lowerCAmelCase = []
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->int:
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
lowerCAmelCase = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
lowerCAmelCase = self.__min_dist_top_down_dp(SCREAMING_SNAKE_CASE_ , n - 1 )
lowerCAmelCase = self.__min_dist_top_down_dp(m - 1 , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = self.__min_dist_top_down_dp(m - 1 , n - 1 )
lowerCAmelCase = 1 + min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return self.dp[m][n]
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->int:
lowerCAmelCase = worda
lowerCAmelCase = worda
lowerCAmelCase = [[-1 for _ in range(len(SCREAMING_SNAKE_CASE_ ) )] for _ in range(len(SCREAMING_SNAKE_CASE_ ) )]
return self.__min_dist_top_down_dp(len(SCREAMING_SNAKE_CASE_ ) - 1 , len(SCREAMING_SNAKE_CASE_ ) - 1 )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->int:
lowerCAmelCase = worda
lowerCAmelCase = worda
lowerCAmelCase = len(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = len(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
lowerCAmelCase = j
elif j == 0: # second string is empty
lowerCAmelCase = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
lowerCAmelCase = self.dp[i - 1][j - 1]
else:
lowerCAmelCase = self.dp[i][j - 1]
lowerCAmelCase = self.dp[i - 1][j]
lowerCAmelCase = self.dp[i - 1][j - 1]
lowerCAmelCase = 1 + min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return self.dp[m][n]
if __name__ == "__main__":
lowercase__ : Optional[Any] = EditDistance()
print('''****************** Testing Edit Distance DP Algorithm ******************''')
print()
lowercase__ : Any = input('''Enter the first string: ''').strip()
lowercase__ : str = input('''Enter the second string: ''').strip()
print()
print(f'The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}')
print(f'The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}')
print()
print('''*************** End of Testing Edit Distance DP Algorithm ***************''')
| 312 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def _a (lowercase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _lowercase ( nn.Module ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : nn.Module , SCREAMING_SNAKE_CASE_ : int ) -> str:
super().__init__()
__snake_case = module
__snake_case = nn.Sequential(
nn.Linear(module.in_features , SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ ) , nn.Linear(SCREAMING_SNAKE_CASE_ , module.out_features , bias=SCREAMING_SNAKE_CASE_ ) , )
__snake_case = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=SCREAMING_SNAKE_CASE_ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Union[str, Any]:
return self.module(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) + self.adapter(SCREAMING_SNAKE_CASE_ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowercase ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
_SCREAMING_SNAKE_CASE : Tuple = "bigscience/bloom-1b7"
# Constant values
_SCREAMING_SNAKE_CASE : Union[str, Any] = 2.109659552692574
_SCREAMING_SNAKE_CASE : Optional[Any] = "Hello my name is"
_SCREAMING_SNAKE_CASE : List[str] = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
_SCREAMING_SNAKE_CASE : Dict = 1_0
def a ( self : Optional[Any] ) -> List[Any]:
# Models and tokenizer
__snake_case = AutoTokenizer.from_pretrained(self.model_name )
class _lowercase ( __lowercase ):
def a ( self : Union[str, Any] ) -> List[str]:
super().setUp()
# Models and tokenizer
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto' )
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
def a ( self : Optional[Any] ) -> Any:
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def a ( self : Optional[Any] ) -> int:
__snake_case = self.model_abit.config
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'quantization_config' ) )
__snake_case = config.to_dict()
__snake_case = config.to_diff_dict()
__snake_case = config.to_json_string()
def a ( self : Optional[Any] ) -> str:
from bitsandbytes.nn import Paramsabit
__snake_case = self.model_fpaa.get_memory_footprint()
__snake_case = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
__snake_case = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def a ( self : Union[str, Any] ) -> Optional[Any]:
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(SCREAMING_SNAKE_CASE_ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def a ( self : Union[str, Any] ) -> int:
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
__snake_case = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) , self.EXPECTED_OUTPUTS )
def a ( self : Optional[Any] ) -> Dict:
__snake_case = BitsAndBytesConfig()
__snake_case = True
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
__snake_case = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) , self.EXPECTED_OUTPUTS )
def a ( self : List[Any] ) -> str:
with self.assertRaises(SCREAMING_SNAKE_CASE_ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(SCREAMING_SNAKE_CASE_ )
def a ( self : Any ) -> Union[str, Any]:
__snake_case = BitsAndBytesConfig()
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=SCREAMING_SNAKE_CASE_ , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' , bnb_abit_quant_type='nf4' , )
def a ( self : Tuple ) -> Dict:
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with `str`
self.model_abit.to('cpu' )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0' ) )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
__snake_case = self.model_fpaa.to(torch.floataa )
__snake_case = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
__snake_case = self.model_fpaa.to('cpu' )
# Check this does not throw an error
__snake_case = self.model_fpaa.half()
# Check this does not throw an error
__snake_case = self.model_fpaa.float()
def a ( self : Tuple ) -> Union[str, Any]:
__snake_case = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowercase ( unittest.TestCase ):
@classmethod
def a ( cls : Union[str, Any] ) -> Dict:
__snake_case = 't5-small'
__snake_case = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
__snake_case = AutoTokenizer.from_pretrained(cls.model_name )
__snake_case = 'Translate in German: Hello, my dog is cute'
def a ( self : List[Any] ) -> str:
gc.collect()
torch.cuda.empty_cache()
def a ( self : int ) -> Optional[Any]:
from transformers import TaForConditionalGeneration
__snake_case = TaForConditionalGeneration._keep_in_fpaa_modules
__snake_case = None
# test with `t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
# test with `flan-t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
__snake_case = modules
def a ( self : List[str] ) -> Any:
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
# test with `flan-t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
class _lowercase ( __lowercase ):
def a ( self : Dict ) -> str:
super().setUp()
# model_name
__snake_case = 'bigscience/bloom-560m'
__snake_case = 't5-small'
# Different types of model
__snake_case = AutoModel.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# Sequence classification model
__snake_case = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# CausalLM model
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# Seq2seq model
__snake_case = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
def a ( self : int ) -> Dict:
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def a ( self : Any ) -> Optional[Any]:
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class _lowercase ( __lowercase ):
def a ( self : str ) -> Union[str, Any]:
super().setUp()
def a ( self : Optional[Any] ) -> str:
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def a ( self : Optional[int] ) -> List[str]:
__snake_case = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__snake_case = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class _lowercase ( __lowercase ):
def a ( self : Optional[int] ) -> Union[str, Any]:
super().setUp()
def a ( self : Optional[int] ) -> List[Any]:
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='balanced' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
# Second real batch
__snake_case = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) , self.EXPECTED_OUTPUTS )
class _lowercase ( __lowercase ):
def a ( self : Any ) -> str:
__snake_case = 'facebook/opt-350m'
super().setUp()
def a ( self : int ) -> List[Any]:
if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ):
return
# Step 1: freeze all parameters
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
__snake_case = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__snake_case = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(SCREAMING_SNAKE_CASE_ ) ):
__snake_case = LoRALayer(module.q_proj , rank=16 )
__snake_case = LoRALayer(module.k_proj , rank=16 )
__snake_case = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
__snake_case = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__snake_case = model.forward(**SCREAMING_SNAKE_CASE_ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(SCREAMING_SNAKE_CASE_ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = "gpt2-xl"
_SCREAMING_SNAKE_CASE : Optional[int] = 3.3191854854152187
| 56 | 0 |
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
A__ : List[Any] = logging.get_logger(__name__)
# General docstring
A__ : Union[str, Any] = "MobileNetV1Config"
# Base docstring
A__ : int = "google/mobilenet_v1_1.0_224"
A__ : Any = [1, 1024, 7, 7]
# Image classification docstring
A__ : Any = "google/mobilenet_v1_1.0_224"
A__ : Tuple = "tabby, tabby cat"
A__ : Tuple = [
"google/mobilenet_v1_1.0_224",
"google/mobilenet_v1_0.75_192",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def UpperCamelCase( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Tuple ,__UpperCamelCase : Tuple=None ):
lowerCAmelCase_ : List[str] = {}
if isinstance(lowercase__ ,lowercase__ ):
lowerCAmelCase_ : int = model.mobilenet_va
else:
lowerCAmelCase_ : Optional[Any] = model
lowerCAmelCase_ : Optional[int] = '''MobilenetV1/Conv2d_0/'''
lowerCAmelCase_ : Tuple = backbone.conv_stem.convolution.weight
lowerCAmelCase_ : str = backbone.conv_stem.normalization.bias
lowerCAmelCase_ : Any = backbone.conv_stem.normalization.weight
lowerCAmelCase_ : Any = backbone.conv_stem.normalization.running_mean
lowerCAmelCase_ : Optional[Any] = backbone.conv_stem.normalization.running_var
for i in range(13 ):
lowerCAmelCase_ : Union[str, Any] = i + 1
lowerCAmelCase_ : int = i * 2
lowerCAmelCase_ : List[str] = backbone.layer[pt_index]
lowerCAmelCase_ : Optional[int] = f"""MobilenetV1/Conv2d_{tf_index}_depthwise/"""
lowerCAmelCase_ : List[str] = pointer.convolution.weight
lowerCAmelCase_ : int = pointer.normalization.bias
lowerCAmelCase_ : Optional[int] = pointer.normalization.weight
lowerCAmelCase_ : str = pointer.normalization.running_mean
lowerCAmelCase_ : Any = pointer.normalization.running_var
lowerCAmelCase_ : int = backbone.layer[pt_index + 1]
lowerCAmelCase_ : Dict = f"""MobilenetV1/Conv2d_{tf_index}_pointwise/"""
lowerCAmelCase_ : Tuple = pointer.convolution.weight
lowerCAmelCase_ : Tuple = pointer.normalization.bias
lowerCAmelCase_ : Optional[Any] = pointer.normalization.weight
lowerCAmelCase_ : int = pointer.normalization.running_mean
lowerCAmelCase_ : Union[str, Any] = pointer.normalization.running_var
if isinstance(lowercase__ ,lowercase__ ):
lowerCAmelCase_ : Optional[int] = '''MobilenetV1/Logits/Conv2d_1c_1x1/'''
lowerCAmelCase_ : Tuple = model.classifier.weight
lowerCAmelCase_ : List[str] = model.classifier.bias
return tf_to_pt_map
def UpperCamelCase( __UpperCamelCase : List[str] ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Tuple ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'''Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '''
'''https://www.tensorflow.org/install/ for installation instructions.''' )
raise
# Load weights from TF model
lowerCAmelCase_ : List[str] = tf.train.list_variables(lowercase__ )
lowerCAmelCase_ : Union[str, Any] = {}
for name, shape in init_vars:
logger.info(f"""Loading TF weight {name} with shape {shape}""" )
lowerCAmelCase_ : Optional[int] = tf.train.load_variable(lowercase__ ,lowercase__ )
lowerCAmelCase_ : List[Any] = array
# Build TF to PyTorch weights loading map
lowerCAmelCase_ : Dict = _build_tf_to_pytorch_map(lowercase__ ,lowercase__ ,lowercase__ )
for name, pointer in tf_to_pt_map.items():
logger.info(f"""Importing {name}""" )
if name not in tf_weights:
logger.info(f"""{name} not in tf pre-trained weights, skipping""" )
continue
lowerCAmelCase_ : int = tf_weights[name]
if "depthwise_weights" in name:
logger.info('''Transposing depthwise''' )
lowerCAmelCase_ : Any = np.transpose(lowercase__ ,(2, 3, 0, 1) )
elif "weights" in name:
logger.info('''Transposing''' )
if len(pointer.shape ) == 2: # copying into linear layer
lowerCAmelCase_ : Dict = array.squeeze().transpose()
else:
lowerCAmelCase_ : List[str] = np.transpose(lowercase__ ,(3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" )
logger.info(f"""Initialize PyTorch weight {name} {array.shape}""" )
lowerCAmelCase_ : int = torch.from_numpy(lowercase__ )
tf_weights.pop(lowercase__ ,lowercase__ )
tf_weights.pop(name + '''/RMSProp''' ,lowercase__ )
tf_weights.pop(name + '''/RMSProp_1''' ,lowercase__ )
tf_weights.pop(name + '''/ExponentialMovingAverage''' ,lowercase__ )
logger.info(f"""Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}""" )
return model
def UpperCamelCase( __UpperCamelCase : torch.Tensor ,__UpperCamelCase : nn.Convad ):
lowerCAmelCase_ , lowerCAmelCase_ : str = features.shape[-2:]
lowerCAmelCase_ , lowerCAmelCase_ : int = conv_layer.stride
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = conv_layer.kernel_size
if in_height % stride_height == 0:
lowerCAmelCase_ : Union[str, Any] = max(kernel_height - stride_height ,0 )
else:
lowerCAmelCase_ : Any = max(kernel_height - (in_height % stride_height) ,0 )
if in_width % stride_width == 0:
lowerCAmelCase_ : str = max(kernel_width - stride_width ,0 )
else:
lowerCAmelCase_ : List[Any] = max(kernel_width - (in_width % stride_width) ,0 )
lowerCAmelCase_ : Optional[int] = pad_along_width // 2
lowerCAmelCase_ : int = pad_along_width - pad_left
lowerCAmelCase_ : List[Any] = pad_along_height // 2
lowerCAmelCase_ : Optional[int] = pad_along_height - pad_top
lowerCAmelCase_ : str = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(lowercase__ ,lowercase__ ,'''constant''' ,0.0 )
class __snake_case ( nn.Module ):
def __init__( self : Tuple , A_ : MobileNetVaConfig , A_ : int , A_ : int , A_ : int , A_ : Optional[int] = 1 , A_ : Optional[int] = 1 , A_ : bool = False , A_ : Optional[bool] = True , A_ : Optional[bool or str] = True , ):
super().__init__()
lowerCAmelCase_ : Optional[Any] = config
if in_channels % groups != 0:
raise ValueError(F"""Input channels ({in_channels}) are not divisible by {groups} groups.""")
if out_channels % groups != 0:
raise ValueError(F"""Output channels ({out_channels}) are not divisible by {groups} groups.""")
lowerCAmelCase_ : Optional[int] = 0 if config.tf_padding else int((kernel_size - 1) / 2)
lowerCAmelCase_ : Union[str, Any] = nn.Convad(
in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , kernel_size=SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , groups=SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ , padding_mode='''zeros''' , )
if use_normalization:
lowerCAmelCase_ : Optional[int] = nn.BatchNormad(
num_features=SCREAMING_SNAKE_CASE_ , eps=config.layer_norm_eps , momentum=0.9997 , affine=SCREAMING_SNAKE_CASE_ , track_running_stats=SCREAMING_SNAKE_CASE_ , )
else:
lowerCAmelCase_ : Tuple = None
if use_activation:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
lowerCAmelCase_ : Dict = ACTaFN[use_activation]
elif isinstance(config.hidden_act , SCREAMING_SNAKE_CASE_):
lowerCAmelCase_ : int = ACTaFN[config.hidden_act]
else:
lowerCAmelCase_ : List[Any] = config.hidden_act
else:
lowerCAmelCase_ : int = None
def UpperCAmelCase__ ( self : List[str] , A_ : torch.Tensor):
if self.config.tf_padding:
lowerCAmelCase_ : Optional[Any] = apply_tf_padding(SCREAMING_SNAKE_CASE_ , self.convolution)
lowerCAmelCase_ : List[str] = self.convolution(SCREAMING_SNAKE_CASE_)
if self.normalization is not None:
lowerCAmelCase_ : List[Any] = self.normalization(SCREAMING_SNAKE_CASE_)
if self.activation is not None:
lowerCAmelCase_ : Tuple = self.activation(SCREAMING_SNAKE_CASE_)
return features
class __snake_case ( __lowercase ):
_a = MobileNetVaConfig
_a = load_tf_weights_in_mobilenet_va
_a = "mobilenet_v1"
_a = "pixel_values"
_a = False
def UpperCAmelCase__ ( self : Optional[int] , A_ : Union[nn.Linear, nn.Convad]):
if isinstance(SCREAMING_SNAKE_CASE_ , (nn.Linear, nn.Convad)):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(SCREAMING_SNAKE_CASE_ , nn.BatchNormad):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
A__ : Optional[Any] = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
A__ : str = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'''The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.''' ,__lowercase ,)
class __snake_case ( __lowercase ):
def __init__( self : str , A_ : MobileNetVaConfig , A_ : bool = True):
super().__init__(SCREAMING_SNAKE_CASE_)
lowerCAmelCase_ : List[str] = config
lowerCAmelCase_ : Tuple = 3_2
lowerCAmelCase_ : int = max(int(depth * config.depth_multiplier) , config.min_depth)
lowerCAmelCase_ : Optional[Any] = MobileNetVaConvLayer(
SCREAMING_SNAKE_CASE_ , in_channels=config.num_channels , out_channels=SCREAMING_SNAKE_CASE_ , kernel_size=3 , stride=2 , )
lowerCAmelCase_ : int = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
lowerCAmelCase_ : List[str] = nn.ModuleList()
for i in range(1_3):
lowerCAmelCase_ : Optional[int] = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
lowerCAmelCase_ : Optional[int] = max(int(depth * config.depth_multiplier) , config.min_depth)
self.layer.append(
MobileNetVaConvLayer(
SCREAMING_SNAKE_CASE_ , in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , kernel_size=3 , stride=strides[i] , groups=SCREAMING_SNAKE_CASE_ , ))
self.layer.append(
MobileNetVaConvLayer(
SCREAMING_SNAKE_CASE_ , in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , kernel_size=1 , ))
lowerCAmelCase_ : Dict = nn.AdaptiveAvgPoolad((1, 1)) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def UpperCAmelCase__ ( self : Optional[int] , A_ : int):
raise NotImplementedError
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCAmelCase__ ( self : Optional[int] , A_ : Optional[torch.Tensor] = None , A_ : Optional[bool] = None , A_ : Optional[bool] = None , ):
lowerCAmelCase_ : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('''You have to specify pixel_values''')
lowerCAmelCase_ : str = self.conv_stem(SCREAMING_SNAKE_CASE_)
lowerCAmelCase_ : Dict = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer):
lowerCAmelCase_ : str = layer_module(SCREAMING_SNAKE_CASE_)
if output_hidden_states:
lowerCAmelCase_ : int = all_hidden_states + (hidden_states,)
lowerCAmelCase_ : List[str] = hidden_states
if self.pooler is not None:
lowerCAmelCase_ : str = torch.flatten(self.pooler(SCREAMING_SNAKE_CASE_) , start_dim=1)
else:
lowerCAmelCase_ : List[str] = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None)
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE_ , pooler_output=SCREAMING_SNAKE_CASE_ , hidden_states=SCREAMING_SNAKE_CASE_ , )
@add_start_docstrings(
'''\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ''' ,__lowercase ,)
class __snake_case ( __lowercase ):
def __init__( self : int , A_ : MobileNetVaConfig):
super().__init__(SCREAMING_SNAKE_CASE_)
lowerCAmelCase_ : int = config.num_labels
lowerCAmelCase_ : Dict = MobileNetVaModel(SCREAMING_SNAKE_CASE_)
lowerCAmelCase_ : Any = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
lowerCAmelCase_ : Tuple = nn.Dropout(config.classifier_dropout_prob , inplace=SCREAMING_SNAKE_CASE_)
lowerCAmelCase_ : str = nn.Linear(SCREAMING_SNAKE_CASE_ , config.num_labels) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCAmelCase__ ( self : Tuple , A_ : Optional[torch.Tensor] = None , A_ : Optional[bool] = None , A_ : Optional[torch.Tensor] = None , A_ : Optional[bool] = None , ):
lowerCAmelCase_ : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase_ : Any = self.mobilenet_va(SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_)
lowerCAmelCase_ : List[str] = outputs.pooler_output if return_dict else outputs[1]
lowerCAmelCase_ : Optional[Any] = self.classifier(self.dropout(SCREAMING_SNAKE_CASE_))
lowerCAmelCase_ : Dict = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowerCAmelCase_ : Union[str, Any] = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowerCAmelCase_ : Optional[int] = '''single_label_classification'''
else:
lowerCAmelCase_ : List[Any] = '''multi_label_classification'''
if self.config.problem_type == "regression":
lowerCAmelCase_ : Union[str, Any] = MSELoss()
if self.num_labels == 1:
lowerCAmelCase_ : Optional[int] = loss_fct(logits.squeeze() , labels.squeeze())
else:
lowerCAmelCase_ : List[Any] = loss_fct(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
elif self.config.problem_type == "single_label_classification":
lowerCAmelCase_ : Any = CrossEntropyLoss()
lowerCAmelCase_ : Tuple = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
lowerCAmelCase_ : Tuple = BCEWithLogitsLoss()
lowerCAmelCase_ : Tuple = loss_fct(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
if not return_dict:
lowerCAmelCase_ : str = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ , hidden_states=outputs.hidden_states , )
| 171 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class _lowercase ( unittest.TestCase ):
def a ( self : int ) -> List[str]:
__snake_case = '| <pad> <unk> <s> </s> a b c d e f g h i j k'.split()
__snake_case = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
__snake_case = {
'unk_token': '<unk>',
'bos_token': '<s>',
'eos_token': '</s>',
}
__snake_case = {
'feature_size': 1,
'padding_value': 0.0,
'sampling_rate': 1_6000,
'return_attention_mask': False,
'do_normalize': True,
}
__snake_case = tempfile.mkdtemp()
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__snake_case = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE_ )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
with open(self.feature_extraction_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
# load decoder from hub
__snake_case = 'hf-internal-testing/ngram-beam-search-decoder'
def a ( self : Optional[int] , **SCREAMING_SNAKE_CASE_ : Tuple ) -> Dict:
__snake_case = self.add_kwargs_tokens_map.copy()
kwargs.update(SCREAMING_SNAKE_CASE_ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] , **SCREAMING_SNAKE_CASE_ : Any ) -> Optional[Any]:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def a ( self : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> Tuple:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **SCREAMING_SNAKE_CASE_ )
def a ( self : int ) -> Dict:
shutil.rmtree(self.tmpdirname )
def a ( self : int ) -> Tuple:
__snake_case = self.get_tokenizer()
__snake_case = self.get_feature_extractor()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
__snake_case = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE_ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , SCREAMING_SNAKE_CASE_ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , SCREAMING_SNAKE_CASE_ )
def a ( self : Dict ) -> Union[str, Any]:
__snake_case = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__snake_case = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def a ( self : str ) -> Tuple:
__snake_case = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['xx'] )
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE_ , 'include' ):
WavaVecaProcessorWithLM(
tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def a ( self : List[str] ) -> List[str]:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = floats_list((3, 1000) )
__snake_case = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
__snake_case = processor(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def a ( self : Tuple ) -> Tuple:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = 'This is a test string'
__snake_case = processor(text=SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer(SCREAMING_SNAKE_CASE_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a ( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any]=(2, 10, 16) , SCREAMING_SNAKE_CASE_ : Dict=77 ) -> Dict:
np.random.seed(SCREAMING_SNAKE_CASE_ )
return np.random.rand(*SCREAMING_SNAKE_CASE_ )
def a ( self : Any ) -> Tuple:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits(shape=(10, 16) , seed=13 )
__snake_case = processor.decode(SCREAMING_SNAKE_CASE_ )
__snake_case = decoder.decode_beams(SCREAMING_SNAKE_CASE_ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('</s> <s> </s>' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['fork'], ['spawn']] )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] ) -> Dict:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__snake_case = processor.batch_decode(SCREAMING_SNAKE_CASE_ )
else:
with get_context(SCREAMING_SNAKE_CASE_ ).Pool() as pool:
__snake_case = processor.batch_decode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = list(SCREAMING_SNAKE_CASE_ )
with get_context('fork' ).Pool() as p:
__snake_case = decoder.decode_beams_batch(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case , __snake_case , __snake_case = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.text )
self.assertListEqual(['<s> <s> </s>', '<s> <s> <s>'] , decoded_processor.text )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.logit_score )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.lm_score )
def a ( self : Any ) -> Dict:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits()
__snake_case = 15
__snake_case = -2_0.0
__snake_case = -4.0
__snake_case = processor.batch_decode(
SCREAMING_SNAKE_CASE_ , beam_width=SCREAMING_SNAKE_CASE_ , beam_prune_logp=SCREAMING_SNAKE_CASE_ , token_min_logp=SCREAMING_SNAKE_CASE_ , )
__snake_case = decoded_processor_out.text
__snake_case = list(SCREAMING_SNAKE_CASE_ )
with get_context('fork' ).Pool() as pool:
__snake_case = decoder.decode_beams_batch(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , beam_width=SCREAMING_SNAKE_CASE_ , beam_prune_logp=SCREAMING_SNAKE_CASE_ , token_min_logp=SCREAMING_SNAKE_CASE_ , )
__snake_case = [d[0][0] for d in decoded_decoder_out]
__snake_case = [d[0][2] for d in decoded_decoder_out]
__snake_case = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(['</s> <s> <s>', '<s> <s> <s>'] , SCREAMING_SNAKE_CASE_ )
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
def a ( self : Optional[Any] ) -> Tuple:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits()
__snake_case = 2.0
__snake_case = 5.0
__snake_case = -2_0.0
__snake_case = True
__snake_case = processor.batch_decode(
SCREAMING_SNAKE_CASE_ , alpha=SCREAMING_SNAKE_CASE_ , beta=SCREAMING_SNAKE_CASE_ , unk_score_offset=SCREAMING_SNAKE_CASE_ , lm_score_boundary=SCREAMING_SNAKE_CASE_ , )
__snake_case = decoded_processor_out.text
__snake_case = list(SCREAMING_SNAKE_CASE_ )
decoder.reset_params(
alpha=SCREAMING_SNAKE_CASE_ , beta=SCREAMING_SNAKE_CASE_ , unk_score_offset=SCREAMING_SNAKE_CASE_ , lm_score_boundary=SCREAMING_SNAKE_CASE_ , )
with get_context('fork' ).Pool() as pool:
__snake_case = decoder.decode_beams_batch(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
__snake_case = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(['<s> </s> <s> </s> </s>', '</s> </s> <s> </s> </s>'] , SCREAMING_SNAKE_CASE_ )
__snake_case = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -2_0.0 )
self.assertEqual(lm_model.score_boundary , SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] ) -> List[str]:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = processor.decoder.model_container[processor.decoder._model_key]
__snake_case = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
__snake_case = os.listdir(SCREAMING_SNAKE_CASE_ )
__snake_case = ['alphabet.json', 'language_model']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Dict ) -> Dict:
__snake_case = snapshot_download('hf-internal-testing/processor_with_lm' )
__snake_case = WavaVecaProcessorWithLM.from_pretrained(SCREAMING_SNAKE_CASE_ )
__snake_case = processor.decoder.model_container[processor.decoder._model_key]
__snake_case = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
__snake_case = os.listdir(SCREAMING_SNAKE_CASE_ )
__snake_case = os.listdir(SCREAMING_SNAKE_CASE_ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Any ) -> List[Any]:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = AutoProcessor.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = floats_list((3, 1000) )
__snake_case = processor_wavaveca(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
__snake_case = processor_auto(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
__snake_case = self._get_dummy_logits()
__snake_case = processor_wavaveca.batch_decode(SCREAMING_SNAKE_CASE_ )
__snake_case = processor_auto.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def a ( self : Dict ) -> Optional[int]:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
@staticmethod
def a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int:
__snake_case = [d[key] for d in offsets]
return retrieved_list
def a ( self : Optional[int] ) -> str:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = self._get_dummy_logits()[0]
__snake_case = processor.decode(SCREAMING_SNAKE_CASE_ , output_word_offsets=SCREAMING_SNAKE_CASE_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(' '.join(self.get_from_offsets(outputs['word_offsets'] , 'word' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'end_offset' ) , [1, 3, 5] )
def a ( self : Optional[Any] ) -> Optional[int]:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = self._get_dummy_logits()
__snake_case = processor.batch_decode(SCREAMING_SNAKE_CASE_ , output_word_offsets=SCREAMING_SNAKE_CASE_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(
[' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) for o in outputs['word_offsets']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'end_offset' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def a ( self : Optional[Any] ) -> Optional[Any]:
import torch
__snake_case = load_dataset('common_voice' , 'en' , split='train' , streaming=SCREAMING_SNAKE_CASE_ )
__snake_case = ds.cast_column('audio' , datasets.Audio(sampling_rate=1_6000 ) )
__snake_case = iter(SCREAMING_SNAKE_CASE_ )
__snake_case = next(SCREAMING_SNAKE_CASE_ )
__snake_case = AutoProcessor.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
__snake_case = WavaVecaForCTC.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__snake_case = processor(sample['audio']['array'] , return_tensors='pt' ).input_values
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ ).logits.cpu().numpy()
__snake_case = processor.decode(logits[0] , output_word_offsets=SCREAMING_SNAKE_CASE_ )
__snake_case = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__snake_case = [
{
'start_time': d['start_offset'] * time_offset,
'end_time': d['end_offset'] * time_offset,
'word': d['word'],
}
for d in output['word_offsets']
]
__snake_case = 'WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'
# output words
self.assertEqual(' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) , output.text )
# output times
__snake_case = torch.tensor(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'start_time' ) )
__snake_case = torch.tensor(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'end_time' ) )
# fmt: off
__snake_case = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
__snake_case = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=0.0_1 ) )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=0.0_1 ) )
| 56 | 0 |
def _snake_case (_snake_case : int) -> bool:
if not isinstance(lowercase__ , lowercase__):
raise ValueError('check_bouncy() accepts only integer arguments')
_lowercase =str(lowercase__)
_lowercase =''.join(sorted(lowercase__))
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def _snake_case (_snake_case : float = 99) -> int:
if not 0 < percent < 100:
raise ValueError('solution() only accepts values from 0 to 100')
_lowercase =0
_lowercase =1
while True:
if check_bouncy(lowercase__):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(99)}''')
| 181 |
'''simple docstring'''
def _a (lowercase__ : int , lowercase__ : int ) -> float:
"""simple docstring"""
return base * power(lowercase__ , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print("Raise base to the power of exponent using recursion...")
_a : Union[str, Any] = int(input("Enter the base: ").strip())
_a : Any = int(input("Enter the exponent: ").strip())
_a : List[str] = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
_a : List[Any] = 1 / result
print(f'''{base} to the power of {exponent} is {result}''')
| 56 | 0 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def lowerCAmelCase_ ( lowercase: Dict , lowercase: Any ) -> Any:
'''simple docstring'''
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
_UpperCamelCase: List[Any] = flax_key_tuple[:-1] + ('''weight''',)
_UpperCamelCase: str = torch.permute(lowercase__ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(lowercase__ ):
# linear layer
_UpperCamelCase: int = flax_key_tuple[:-1] + ('''weight''',)
_UpperCamelCase: Optional[int] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_UpperCamelCase: List[str] = flax_key_tuple[:-1] + ('''weight''',)
return flax_key_tuple, flax_tensor
def lowerCAmelCase_ ( lowercase: Optional[Any] , lowercase: List[Any] , lowercase: Optional[int] ) -> List[Any]:
'''simple docstring'''
if "metadata" in layer:
_UpperCamelCase: List[str] = layer.split('''metadata''' )
_UpperCamelCase: Union[str, Any] = ''''''.join(split_layer[0] )[:-1]
_UpperCamelCase: Optional[Any] = [tuple(('''metadata''' + split_layer[1]).split('''/''' ) )]
elif "kvstore" in layer:
_UpperCamelCase: Dict = layer.split('''kvstore''' )
_UpperCamelCase: Tuple = ''''''.join(split_layer[0] )[:-1]
_UpperCamelCase: Optional[Any] = [tuple(('''kvstore''' + split_layer[1]).split('''/''' ) )]
else:
_UpperCamelCase: Dict = layer.split('''/''' )
_UpperCamelCase: List[Any] = '''/'''.join(split_layer[:-1] )
_UpperCamelCase: Union[str, Any] = (split_layer[-1],)
if "kvstore/path" in layer:
_UpperCamelCase: List[str] = F"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
_UpperCamelCase: Any = '''file'''
else:
_UpperCamelCase: Dict = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def lowerCAmelCase_ ( lowercase: Optional[int] , lowercase: Dict ) -> Any:
'''simple docstring'''
_UpperCamelCase: Any = rename_keys(lowercase__ )
_UpperCamelCase: Any = {}
for k, v in current_block.items():
_UpperCamelCase: Any = v
_UpperCamelCase: Optional[int] = new_current_block
torch.save(lowercase__ , lowercase__ )
def lowerCAmelCase_ ( lowercase: List[Any] , lowercase: Tuple , lowercase: List[str] , lowercase: Optional[int] , lowercase: str = WEIGHTS_NAME ) -> str:
'''simple docstring'''
_UpperCamelCase: Any = convert_file_size_to_int(lowercase__ )
_UpperCamelCase: int = []
_UpperCamelCase: List[Any] = {}
_UpperCamelCase: str = 0
_UpperCamelCase: str = 0
os.makedirs(lowercase__ , exist_ok=lowercase__ )
with gfile.GFile(switch_checkpoint_path + '''/checkpoint''' , '''rb''' ) as fp:
_UpperCamelCase: Optional[int] = serialization.msgpack_restore(fp.read() )['''optimizer''']['''target''']
_UpperCamelCase: Dict = flatten_dict(lowercase__ , sep='''/''' )
_UpperCamelCase: Tuple = {}
for layer in checkpoint_info.keys():
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase: Union[str, Any] = get_key_and_tensorstore_dict(
lowercase__ , lowercase__ , lowercase__ )
if curr_real_layer_name in all_layers:
_UpperCamelCase: Union[str, Any] = content
else:
_UpperCamelCase: str = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
_UpperCamelCase: Optional[int] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
_UpperCamelCase: List[Any] = torch.tensor(lowercase__ )
_UpperCamelCase: List[str] = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
_UpperCamelCase , _UpperCamelCase: Union[str, Any] = rename_base_flax_keys(tuple(key.split('''/''' ) ) , lowercase__ )
_UpperCamelCase: Tuple = '''/'''.join(lowercase__ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
_UpperCamelCase: Union[str, Any] = os.path.join(
lowercase__ , weights_name.replace('''.bin''' , F"""-{len(lowercase__ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(lowercase__ , lowercase__ )
sharded_state_dicts.append(current_block.keys() )
del current_block
_UpperCamelCase: Any = {}
_UpperCamelCase: List[Any] = 0
_UpperCamelCase: int = raw_weights.to(getattr(lowercase__ , lowercase__ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
_UpperCamelCase: Optional[Any] = os.path.join(lowercase__ , weights_name.replace('''.bin''' , F"""-{len(lowercase__ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(lowercase__ , lowercase__ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(lowercase__ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
_UpperCamelCase: Any = {}
_UpperCamelCase: List[Any] = {}
for idx, shard in enumerate(lowercase__ ):
_UpperCamelCase: Optional[Any] = weights_name.replace(
'''.bin''' , F"""-{idx+1:05d}-of-{len(lowercase__ ):05d}.bin""" ) # len(sharded_state_dicts):05d}
_UpperCamelCase: Optional[int] = os.path.join(lowercase__ , weights_name.replace('''.bin''' , F"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(lowercase__ , os.path.join(lowercase__ , lowercase__ ) )
_UpperCamelCase: Optional[int] = shard
for key in shard:
_UpperCamelCase: List[Any] = shard_file
# Add the metadata
_UpperCamelCase: Optional[Any] = {'''total_size''': total_size}
_UpperCamelCase: Dict = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(lowercase__ , lowercase__ ) , '''w''' , encoding='''utf-8''' ) as f:
_UpperCamelCase: str = json.dumps(lowercase__ , indent=2 , sort_keys=lowercase__ ) + '''\n'''
f.write(lowercase__ )
return metadata, index
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--max_shard_size''', default='''10GB''', required=False, help='''Max shard size''')
parser.add_argument('''--dtype''', default='''bfloat16''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
UpperCAmelCase_ = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def lowerCAmelCase_ ( ) -> Tuple:
'''simple docstring'''
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
_UpperCamelCase: Union[str, Any] = SwitchTransformersConfig.from_pretrained('''google/switch-base-8''' )
config.save_pretrained('''/home/arthur_huggingface_co/transformers/switch_converted''' )
_UpperCamelCase: str = SwitchTransformersForConditionalGeneration.from_pretrained(
'''/home/arthur_huggingface_co/transformers/switch_converted''' , device_map='''auto''' )
_UpperCamelCase: Union[str, Any] = TaTokenizer.from_pretrained('''t5-small''' )
_UpperCamelCase: Optional[Any] = '''A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'''
_UpperCamelCase: str = tokenizer(lowercase__ , return_tensors='''pt''' ).input_ids
_UpperCamelCase: Union[str, Any] = model.generate(lowercase__ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 271 |
'''simple docstring'''
import math
from collections.abc import Callable
def _a (lowercase__ : Callable[[float], float] , lowercase__ : float , lowercase__ : float ) -> float:
"""simple docstring"""
__snake_case = xa
__snake_case = xa
while True:
if x_n == x_na or function(lowercase__ ) == function(lowercase__ ):
raise ZeroDivisionError('float division by zero, could not find root' )
__snake_case = x_na - (
function(lowercase__ ) / ((function(lowercase__ ) - function(lowercase__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 1_0**-5:
return x_na
__snake_case = x_na
__snake_case = x_na
def _a (lowercase__ : float ) -> float:
"""simple docstring"""
return math.pow(lowercase__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 56 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__magic_name__ = [
"small",
"small-base",
"medium",
"medium-base",
"intermediate",
"intermediate-base",
"large",
"large-base",
"xlarge",
"xlarge-base",
]
__magic_name__ = {
"vocab_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json",
"funnel-transformer/small-base": (
"https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"
),
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json",
"funnel-transformer/large-base": (
"https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"
),
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"
),
},
}
__magic_name__ = {f'''funnel-transformer/{name}''': 5_12 for name in _model_names}
__magic_name__ = {f'''funnel-transformer/{name}''': {"do_lower_case": True} for name in _model_names}
class _lowerCAmelCase ( __lowercase ):
lowercase_ : Optional[int] = VOCAB_FILES_NAMES
lowercase_ : Dict = PRETRAINED_VOCAB_FILES_MAP
lowercase_ : str = PRETRAINED_INIT_CONFIGURATION
lowercase_ : int = FunnelTokenizer
lowercase_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : int = 2
def __init__( self , a_=None , a_=None , a_=True , a_="<unk>" , a_="<sep>" , a_="<pad>" , a_="<cls>" , a_="<mask>" , a_="<s>" , a_="</s>" , a_=True , a_=True , a_=None , a_="##" , **a_ , ) -> Optional[Any]:
super().__init__(
SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , clean_text=SCREAMING_SNAKE_CASE_ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ , wordpieces_prefix=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
_UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , SCREAMING_SNAKE_CASE_ ) != do_lower_case
or normalizer_state.get("strip_accents" , SCREAMING_SNAKE_CASE_ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , SCREAMING_SNAKE_CASE_ ) != tokenize_chinese_chars
):
_UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , normalizer_state.pop("type" ) )
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = strip_accents
_UpperCAmelCase = tokenize_chinese_chars
_UpperCAmelCase = normalizer_class(**SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = do_lower_case
def _a ( self , a_ , a_=None ) -> str:
_UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _a ( self , a_ , a_ = None ) -> List[int]:
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self , a_ , a_ = None ) -> Tuple[str]:
_UpperCAmelCase = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
| 657 |
'''simple docstring'''
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class _lowercase ( __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : str = CpmAntTokenizer
_SCREAMING_SNAKE_CASE : Optional[Any] = False
def a ( self : Optional[Any] ) -> Any:
super().setUp()
__snake_case = [
'<d>',
'</d>',
'<s>',
'</s>',
'</_>',
'<unk>',
'<pad>',
'</n>',
'我',
'是',
'C',
'P',
'M',
'A',
'n',
't',
]
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
@tooslow
def a ( self : List[Any] ) -> Dict:
__snake_case = CpmAntTokenizer.from_pretrained('openbmb/cpm-ant-10b' )
__snake_case = '今天天气真好!'
__snake_case = ['今天', '天气', '真', '好', '!']
__snake_case = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = '今天天气真好!'
__snake_case = [tokenizer.bos_token] + tokens
__snake_case = [6, 9802, 1_4962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 56 | 0 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int ):
if partitions <= 0:
raise ValueError("""partitions must be a positive number!""" )
if partitions > number_of_bytes:
raise ValueError("""partitions can not > number_of_bytes!""" )
lowercase = number_of_bytes // partitions
lowercase = []
for i in range(lowercase__ ):
lowercase = i * bytes_per_partition + 1
lowercase = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(F"""{start_bytes}-{end_bytes}""" )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 588 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def _a (lowercase__ : list ) -> int:
"""simple docstring"""
if not postfix_notation:
return 0
__snake_case = {'+', '-', '*', '/'}
__snake_case = []
for token in postfix_notation:
if token in operations:
__snake_case , __snake_case = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(lowercase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 | 0 |
'''simple docstring'''
from collections import deque
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
snake_case_ = len(lowercase__ )
snake_case_ = deque()
snake_case_ = [False for _ in range(lowercase__ )]
snake_case_ = [-1 for _ in range(lowercase__ )]
snake_case_ = index_of[:]
def strong_connect(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ):
snake_case_ = index # the number when this node is seen
snake_case_ = index # lowest rank node reachable from here
index += 1
stack.append(lowercase__ )
snake_case_ = True
for w in g[v]:
if index_of[w] == -1:
snake_case_ = strong_connect(lowercase__, lowercase__, lowercase__ )
snake_case_ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
snake_case_ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
snake_case_ = []
snake_case_ = stack.pop()
snake_case_ = False
component.append(lowercase__ )
while w != v:
snake_case_ = stack.pop()
snake_case_ = False
component.append(lowercase__ )
components.append(lowercase__ )
return index
snake_case_ = []
for v in range(lowercase__ ):
if index_of[v] == -1:
strong_connect(lowercase__, 0, lowercase__ )
return components
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Dict:
'''simple docstring'''
snake_case_ = [[] for _ in range(lowercase__ )]
for u, v in edges:
g[u].append(lowercase__ )
return g
if __name__ == "__main__":
# Test
a : Optional[Any] = 7
a : str = [0, 0, 1, 2, 3, 3, 4, 4, 6]
a : Dict = [1, 3, 2, 0, 1, 4, 5, 6, 5]
a : Optional[Any] = [(u, v) for u, v in zip(source, target)]
a : str = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 640 |
'''simple docstring'''
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square(lowercase__ : int , lowercase__ : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
__snake_case = update_area_of_max_square(lowercase__ , col + 1 )
__snake_case = update_area_of_max_square(row + 1 , col + 1 )
__snake_case = update_area_of_max_square(row + 1 , lowercase__ )
if mat[row][col]:
__snake_case = 1 + min([right, diagonal, down] )
__snake_case = max(largest_square_area[0] , lowercase__ )
return sub_problem_sol
else:
return 0
__snake_case = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
__snake_case = update_area_of_max_square_using_dp_array(lowercase__ , col + 1 , lowercase__ )
__snake_case = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , lowercase__ )
__snake_case = update_area_of_max_square_using_dp_array(row + 1 , lowercase__ , lowercase__ )
if mat[row][col]:
__snake_case = 1 + min([right, diagonal, down] )
__snake_case = max(largest_square_area[0] , lowercase__ )
__snake_case = sub_problem_sol
return sub_problem_sol
else:
return 0
__snake_case = [0]
__snake_case = [[-1] * cols for _ in range(lowercase__ )]
update_area_of_max_square_using_dp_array(0 , 0 , lowercase__ )
return largest_square_area[0]
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
"""simple docstring"""
__snake_case = [[0] * (cols + 1) for _ in range(rows + 1 )]
__snake_case = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__snake_case = dp_array[row][col + 1]
__snake_case = dp_array[row + 1][col + 1]
__snake_case = dp_array[row + 1][col]
if mat[row][col] == 1:
__snake_case = 1 + min(lowercase__ , lowercase__ , lowercase__ )
__snake_case = max(dp_array[row][col] , lowercase__ )
else:
__snake_case = 0
return largest_square_area
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
"""simple docstring"""
__snake_case = [0] * (cols + 1)
__snake_case = [0] * (cols + 1)
__snake_case = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__snake_case = current_row[col + 1]
__snake_case = next_row[col + 1]
__snake_case = next_row[col]
if mat[row][col] == 1:
__snake_case = 1 + min(lowercase__ , lowercase__ , lowercase__ )
__snake_case = max(current_row[col] , lowercase__ )
else:
__snake_case = 0
__snake_case = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 56 | 0 |
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
__UpperCAmelCase = logging.getLogger(__name__)
__UpperCAmelCase = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
__UpperCAmelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCamelCase :
'''simple docstring'''
_snake_case : Optional[str] = field(
default=__lowercase , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
_snake_case : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(__lowercase )} , )
_snake_case : Optional[str] = field(
default=__lowercase , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
_snake_case : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_snake_case : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_snake_case : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
_snake_case : bool = field(
default=__lowercase , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
_snake_case : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
_snake_case : bool = field(
default=__lowercase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def __UpperCAmelCase ( self ) -> Tuple:
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'--config_overrides can\'t be used in combination with --config_name or --model_name_or_path' )
@dataclass
class lowerCamelCase :
'''simple docstring'''
_snake_case : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
_snake_case : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
_snake_case : Optional[str] = field(default=__lowercase , metadata={'''help''': '''The input training data file (a text file).'''} )
_snake_case : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
_snake_case : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''} , )
_snake_case : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''} , )
_snake_case : bool = field(
default=__lowercase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
_snake_case : Optional[int] = field(
default=5 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
_snake_case : Optional[int] = field(
default=__lowercase , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
} , )
_snake_case : Optional[int] = field(
default=__lowercase , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
_snake_case : float = field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
_snake_case : bool = field(
default=__lowercase , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
def __UpperCAmelCase ( self ) -> Optional[int]:
if self.train_file is not None:
UpperCAmelCase_ : int = self.train_file.split('.' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
UpperCAmelCase_ : Union[str, Any] = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def lowercase__ ( __snake_case : Dict , __snake_case : Dict ):
'''simple docstring'''
with open(lowercase__ , 'r' , encoding='utf-8' ) as f:
UpperCAmelCase_ : Optional[int] = [json.loads(lowercase__ ) for line in f.read().splitlines() if (len(lowercase__ ) > 0 and not line.isspace())]
assert len(lowercase__ ) == len(lowercase__ )
UpperCAmelCase_ : str = {c: dataset[c] for c in dataset.column_names}
UpperCAmelCase_ : List[Any] = refs
return Dataset.from_dict(lowercase__ )
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
UpperCAmelCase_ : Optional[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase_ : Optional[int] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , lowercase__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
UpperCAmelCase_ : List[str] = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
UpperCAmelCase_ : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"train[:{data_args.validation_split_percentage}%]" , )
UpperCAmelCase_ : Dict = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"train[{data_args.validation_split_percentage}%:]" , )
else:
UpperCAmelCase_ : Dict = {}
if data_args.train_file is not None:
UpperCAmelCase_ : str = data_args.train_file
if data_args.validation_file is not None:
UpperCAmelCase_ : int = data_args.validation_file
UpperCAmelCase_ : Optional[Any] = data_args.train_file.split('.' )[-1]
if extension == "txt":
UpperCAmelCase_ : Optional[int] = 'text'
UpperCAmelCase_ : Optional[Any] = load_dataset(lowercase__ , data_files=lowercase__ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase_ : int = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
UpperCAmelCase_ : Any = AutoConfig.from_pretrained(model_args.config_name , **lowercase__ )
elif model_args.model_name_or_path:
UpperCAmelCase_ : int = AutoConfig.from_pretrained(model_args.model_name_or_path , **lowercase__ )
else:
UpperCAmelCase_ : Tuple = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(F"New config: {config}" )
UpperCAmelCase_ : int = {
'cache_dir': model_args.cache_dir,
'use_fast': model_args.use_fast_tokenizer,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **lowercase__ )
elif model_args.model_name_or_path:
UpperCAmelCase_ : int = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **lowercase__ )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.' )
if model_args.model_name_or_path:
UpperCAmelCase_ : Any = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
UpperCAmelCase_ : int = AutoModelForMaskedLM.from_config(lowercase__ )
model.resize_token_embeddings(len(lowercase__ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
UpperCAmelCase_ : List[str] = datasets['train'].column_names
else:
UpperCAmelCase_ : Tuple = datasets['validation'].column_names
UpperCAmelCase_ : Union[str, Any] = 'text' if 'text' in column_names else column_names[0]
UpperCAmelCase_ : Tuple = 'max_length' if data_args.pad_to_max_length else False
def tokenize_function(__snake_case : Any ):
# Remove empty lines
UpperCAmelCase_ : Union[str, Any] = [line for line in examples['text'] if len(lowercase__ ) > 0 and not line.isspace()]
return tokenizer(examples['text'] , padding=lowercase__ , truncation=lowercase__ , max_length=data_args.max_seq_length )
UpperCAmelCase_ : List[Any] = datasets.map(
lowercase__ , batched=lowercase__ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
UpperCAmelCase_ : Optional[Any] = add_chinese_references(tokenized_datasets['train'] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
UpperCAmelCase_ : Union[str, Any] = add_chinese_references(
tokenized_datasets['validation'] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
UpperCAmelCase_ : List[Any] = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
UpperCAmelCase_ : Any = False
# Data collator
# This one will take care of randomly masking the tokens.
UpperCAmelCase_ : Tuple = DataCollatorForWholeWordMask(tokenizer=lowercase__ , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
UpperCAmelCase_ : str = Trainer(
model=lowercase__ , args=lowercase__ , train_dataset=tokenized_datasets['train'] if training_args.do_train else None , eval_dataset=tokenized_datasets['validation'] if training_args.do_eval else None , tokenizer=lowercase__ , data_collator=lowercase__ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
UpperCAmelCase_ : List[str] = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
UpperCAmelCase_ : Any = model_args.model_name_or_path
else:
UpperCAmelCase_ : Tuple = None
UpperCAmelCase_ : List[Any] = trainer.train(resume_from_checkpoint=lowercase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
UpperCAmelCase_ : Optional[Any] = os.path.join(training_args.output_dir , 'train_results.txt' )
if trainer.is_world_process_zero():
with open(lowercase__ , 'w' ) as writer:
logger.info('***** Train results *****' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# Evaluation
UpperCAmelCase_ : Optional[Any] = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
UpperCAmelCase_ : str = trainer.evaluate()
UpperCAmelCase_ : int = math.exp(eval_output['eval_loss'] )
UpperCAmelCase_ : List[str] = perplexity
UpperCAmelCase_ : List[Any] = os.path.join(training_args.output_dir , 'eval_results_mlm_wwm.txt' )
if trainer.is_world_process_zero():
with open(lowercase__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in sorted(results.items() ):
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
return results
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 406 |
'''simple docstring'''
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def _a () -> Union[str, Any]:
"""simple docstring"""
__snake_case = 1_0
__snake_case = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
__snake_case = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [9_7], 'text': ['1976']}] * 1_0,
'id': list(range(lowercase__ ) ),
} , features=lowercase__ , )
return dataset
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple , lowercase__ : Dict ) -> Tuple:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=lowercase__ )
return filename
# FILE_CONTENT + files
_a : Union[str, Any] = "\\n Text data.\n Second line of data."
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> List[str]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt'
__snake_case = FILE_CONTENT
with open(lowercase__ , 'w' ) as f:
f.write(lowercase__ )
return filename
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
import bza
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
__snake_case = bytes(lowercase__ , 'utf-8' )
with bza.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Union[str, Any] ) -> Dict:
"""simple docstring"""
import gzip
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
__snake_case = bytes(lowercase__ , 'utf-8' )
with gzip.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple ) -> Optional[int]:
"""simple docstring"""
if datasets.config.LZ4_AVAILABLE:
import lza.frame
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
__snake_case = bytes(lowercase__ , 'utf-8' )
with lza.frame.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Dict , lowercase__ : Tuple ) -> Tuple:
"""simple docstring"""
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(lowercase__ , 'w' ) as archive:
archive.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[int] , lowercase__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
import tarfile
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(lowercase__ , 'w' ) as f:
f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> Tuple:
"""simple docstring"""
import lzma
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
__snake_case = bytes(lowercase__ , 'utf-8' )
with lzma.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Dict , lowercase__ : str ) -> Union[str, Any]:
"""simple docstring"""
import zipfile
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> int:
"""simple docstring"""
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
__snake_case = bytes(lowercase__ , 'utf-8' )
with zstd.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.xml'
__snake_case = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(lowercase__ , 'w' ) as f:
f.write(lowercase__ )
return filename
_a : int = [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
_a : List[str] = [
{"col_1": "4", "col_2": 4, "col_3": 4.0},
{"col_1": "5", "col_2": 5, "col_3": 5.0},
]
_a : Tuple = {
"col_1": ["0", "1", "2", "3"],
"col_2": [0, 1, 2, 3],
"col_3": [0.0, 1.0, 2.0, 3.0],
}
_a : Optional[int] = [
{"col_3": 0.0, "col_1": "0", "col_2": 0},
{"col_3": 1.0, "col_1": "1", "col_2": 1},
]
_a : Any = [
{"col_1": "s0", "col_2": 0, "col_3": 0.0},
{"col_1": "s1", "col_2": 1, "col_3": 1.0},
{"col_1": "s2", "col_2": 2, "col_3": 2.0},
{"col_1": "s3", "col_2": 3, "col_3": 3.0},
]
@pytest.fixture(scope='session' )
def _a () -> Optional[Any]:
"""simple docstring"""
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case = datasets.Dataset.from_dict(lowercase__ )
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Any ) -> Dict:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(lowercase__ ) ) as con:
__snake_case = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> Tuple:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(lowercase__ , 'w' , newline='' ) as f:
__snake_case = csv.DictWriter(lowercase__ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> Tuple:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(lowercase__ , 'w' , newline='' ) as f:
__snake_case = csv.DictWriter(lowercase__ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : str , lowercase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
import bza
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(lowercase__ , 'rb' ) as f:
__snake_case = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] , lowercase__ : Union[str, Any] , lowercase__ : str ) -> Optional[int]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] , lowercase__ : Tuple , lowercase__ : int ) -> int:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(lowercase__ , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] , lowercase__ : Dict , lowercase__ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] ) -> int:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
__snake_case = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(lowercase__ , 'wb' ) as f:
__snake_case = pq.ParquetWriter(lowercase__ , schema=lowercase__ )
__snake_case = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowercase__ ) )] for k in DATA[0]} , schema=lowercase__ )
writer.write_table(lowercase__ )
writer.close()
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> List[str]:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
__snake_case = {'data': DATA}
with open(lowercase__ , 'w' ) as f:
json.dump(lowercase__ , lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] ) -> List[Any]:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
__snake_case = {'data': DATA_DICT_OF_LISTS}
with open(lowercase__ , 'w' ) as f:
json.dump(lowercase__ , lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(lowercase__ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowercase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(lowercase__ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowercase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : int ) -> Tuple:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(lowercase__ , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(lowercase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Dict ) -> int:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(lowercase__ , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(lowercase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : int , lowercase__ : List[Any] ) -> Dict:
"""simple docstring"""
import gzip
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(lowercase__ , 'rb' ) as orig_file:
with gzip.open(lowercase__ , 'wb' ) as zipped_file:
zipped_file.writelines(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Union[str, Any] , lowercase__ : Dict ) -> Optional[Any]:
"""simple docstring"""
import gzip
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(lowercase__ , 'rb' ) as orig_file:
with gzip.open(lowercase__ , 'wb' ) as zipped_file:
zipped_file.writelines(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : str , lowercase__ : str , lowercase__ : str ) -> Optional[int]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Dict , lowercase__ : Optional[int] , lowercase__ : List[Any] , lowercase__ : List[Any] ) -> str:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.join('nested' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : str ) -> Optional[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : str , lowercase__ : Optional[int] , lowercase__ : Tuple ) -> List[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(lowercase__ , 'w' ) as f:
f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple , lowercase__ : Tuple , lowercase__ : Dict , lowercase__ : int ) -> Optional[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(lowercase__ , 'w' ) as f:
f.add(lowercase__ , arcname=os.path.join('nested' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__snake_case = ['0', '1', '2', '3']
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(lowercase__ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] ) -> List[Any]:
"""simple docstring"""
__snake_case = ['0', '1', '2', '3']
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(lowercase__ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[int] ) -> Dict:
"""simple docstring"""
__snake_case = ['0', '1', '2', '3']
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(lowercase__ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] , lowercase__ : Union[str, Any] , lowercase__ : Any ) -> str:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple , lowercase__ : Any , lowercase__ : Tuple ) -> List[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] , lowercase__ : Optional[int] , lowercase__ : Any ) -> Union[str, Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename('unsupported.ext' ) )
f.write(lowercase__ , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Any ) -> List[Any]:
"""simple docstring"""
__snake_case = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(lowercase__ , 'w' , encoding='utf-8' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a () -> int:
"""simple docstring"""
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def _a () -> Optional[int]:
"""simple docstring"""
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] , lowercase__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : str ) -> List[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 1_0 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 1_0 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
return data_dir
| 56 | 0 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class SCREAMING_SNAKE_CASE__ ( __lowercase ):
"""simple docstring"""
A__ : str = "SpeechT5FeatureExtractor"
A__ : int = "SpeechT5Tokenizer"
def __init__( self , A , A ) -> Dict:
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __call__( self , *A , **A ) -> Any:
A: List[str] = kwargs.pop("""audio""" , SCREAMING_SNAKE_CASE_ )
A: int = kwargs.pop("""text""" , SCREAMING_SNAKE_CASE_ )
A: int = kwargs.pop("""text_target""" , SCREAMING_SNAKE_CASE_ )
A: Optional[Any] = kwargs.pop("""audio_target""" , SCREAMING_SNAKE_CASE_ )
A: Tuple = kwargs.pop("""sampling_rate""" , SCREAMING_SNAKE_CASE_ )
if audio is not None and text is not None:
raise ValueError(
"""Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?""" )
if audio_target is not None and text_target is not None:
raise ValueError(
"""Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?""" )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
"""You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.""" )
if audio is not None:
A: List[Any] = self.feature_extractor(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
elif text is not None:
A: Tuple = self.tokenizer(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
else:
A: Union[str, Any] = None
if audio_target is not None:
A: Optional[Any] = self.feature_extractor(audio_target=SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
A: List[str] = targets["""input_values"""]
elif text_target is not None:
A: Tuple = self.tokenizer(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
A: int = targets["""input_ids"""]
else:
A: List[Any] = None
if inputs is None:
return targets
if targets is not None:
A: Dict = labels
A: Any = targets.get("""attention_mask""" )
if decoder_attention_mask is not None:
A: Any = decoder_attention_mask
return inputs
def a__ ( self , *A , **A ) -> Any:
A: Union[str, Any] = kwargs.pop("""input_values""" , SCREAMING_SNAKE_CASE_ )
A: int = kwargs.pop("""input_ids""" , SCREAMING_SNAKE_CASE_ )
A: Optional[Any] = kwargs.pop("""labels""" , SCREAMING_SNAKE_CASE_ )
if input_values is not None and input_ids is not None:
raise ValueError("""Cannot process both `input_values` and `input_ids` inputs.""" )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
"""You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.""" )
if input_values is not None:
A: str = self.feature_extractor.pad(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
elif input_ids is not None:
A: List[Any] = self.tokenizer.pad(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
else:
A: Union[str, Any] = None
if labels is not None:
if "input_ids" in labels or (isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and "input_ids" in labels[0]):
A: List[str] = self.tokenizer.pad(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
A: List[str] = targets["""input_ids"""]
else:
A: Optional[Any] = self.feature_extractor.feature_size
A: List[str] = self.feature_extractor.num_mel_bins
A: List[Any] = self.feature_extractor.pad(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
A: Optional[Any] = feature_size_hack
A: Union[str, Any] = targets["""input_values"""]
else:
A: Dict = None
if inputs is None:
return targets
if targets is not None:
A: Dict = labels
A: int = targets.get("""attention_mask""" )
if decoder_attention_mask is not None:
A: List[str] = decoder_attention_mask
return inputs
def a__ ( self , *A , **A ) -> Tuple:
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def a__ ( self , *A , **A ) -> Union[str, Any]:
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 135 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a : Optional[Any] = logging.get_logger(__name__)
_a : Tuple = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = "camembert"
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3_0522 , SCREAMING_SNAKE_CASE_ : str=768 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=12 , SCREAMING_SNAKE_CASE_ : Dict=12 , SCREAMING_SNAKE_CASE_ : Optional[Any]=3072 , SCREAMING_SNAKE_CASE_ : Tuple="gelu" , SCREAMING_SNAKE_CASE_ : List[str]=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=512 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : Any=0.0_2 , SCREAMING_SNAKE_CASE_ : Tuple=1e-12 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1 , SCREAMING_SNAKE_CASE_ : Dict=0 , SCREAMING_SNAKE_CASE_ : int=2 , SCREAMING_SNAKE_CASE_ : Dict="absolute" , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : Tuple=None , **SCREAMING_SNAKE_CASE_ : Dict , ) -> int:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = position_embedding_type
__snake_case = use_cache
__snake_case = classifier_dropout
class _lowercase ( __lowercase ):
@property
def a ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__snake_case = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 56 | 0 |
"""simple docstring"""
from math import sqrt
def UpperCAmelCase ( a__ ):
'''simple docstring'''
assert isinstance(lowercase__ , lowercase__ ) and (
number >= 0
), "'number' must been an int and positive"
lowerCAmelCase :Any = True
# 0 and 1 are none primes.
if number <= 1:
lowerCAmelCase :str = False
for divisor in range(2 , int(round(sqrt(lowercase__ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowerCAmelCase :Optional[Any] = False
break
# precondition
assert isinstance(lowercase__ , lowercase__ ), "'status' must been from type bool"
return status
def UpperCAmelCase ( a__ ):
'''simple docstring'''
assert isinstance(lowercase__ , lowercase__ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowerCAmelCase :Optional[Any] = list(range(2 , n + 1 ) )
lowerCAmelCase :Tuple = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowercase__ ) ):
for j in range(i + 1 , len(lowercase__ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowerCAmelCase :Union[str, Any] = 0
# filters actual prime numbers.
lowerCAmelCase :Dict = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowercase__ , lowercase__ ), "'ans' must been from type list"
return ans
def UpperCAmelCase ( a__ ):
'''simple docstring'''
assert isinstance(lowercase__ , lowercase__ ) and (n > 2), "'N' must been an int and > 2"
lowerCAmelCase :List[str] = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowercase__ ):
ans.append(lowercase__ )
# precondition
assert isinstance(lowercase__ , lowercase__ ), "'ans' must been from type list"
return ans
def UpperCAmelCase ( a__ ):
'''simple docstring'''
assert isinstance(lowercase__ , lowercase__ ) and number >= 0, "'number' must been an int and >= 0"
lowerCAmelCase :Optional[int] = [] # this list will be returns of the function.
# potential prime number factors.
lowerCAmelCase :Optional[int] = 2
lowerCAmelCase :List[str] = number
if number == 0 or number == 1:
ans.append(lowercase__ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowercase__ ):
while quotient != 1:
if is_prime(lowercase__ ) and (quotient % factor == 0):
ans.append(lowercase__ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowercase__ )
# precondition
assert isinstance(lowercase__ , lowercase__ ), "'ans' must been from type list"
return ans
def UpperCAmelCase ( a__ ):
'''simple docstring'''
assert isinstance(lowercase__ , lowercase__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase :List[Any] = 0
# prime factorization of 'number'
lowerCAmelCase :Optional[int] = prime_factorization(lowercase__ )
lowerCAmelCase :Union[str, Any] = max(lowercase__ )
# precondition
assert isinstance(lowercase__ , lowercase__ ), "'ans' must been from type int"
return ans
def UpperCAmelCase ( a__ ):
'''simple docstring'''
assert isinstance(lowercase__ , lowercase__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase :Union[str, Any] = 0
# prime factorization of 'number'
lowerCAmelCase :List[str] = prime_factorization(lowercase__ )
lowerCAmelCase :Tuple = min(lowercase__ )
# precondition
assert isinstance(lowercase__ , lowercase__ ), "'ans' must been from type int"
return ans
def UpperCAmelCase ( a__ ):
'''simple docstring'''
assert isinstance(lowercase__ , lowercase__ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowercase__ ), "compare bust been from type bool"
return number % 2 == 0
def UpperCAmelCase ( a__ ):
'''simple docstring'''
assert isinstance(lowercase__ , lowercase__ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowercase__ ), "compare bust been from type bool"
return number % 2 != 0
def UpperCAmelCase ( a__ ):
'''simple docstring'''
assert (
isinstance(lowercase__ , lowercase__ ) and (number > 2) and is_even(lowercase__ )
), "'number' must been an int, even and > 2"
lowerCAmelCase :List[Any] = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowerCAmelCase :List[Any] = get_prime_numbers(lowercase__ )
lowerCAmelCase :str = len(lowercase__ )
# run variable for while-loops.
lowerCAmelCase :int = 0
lowerCAmelCase :Optional[Any] = None
# exit variable. for break up the loops
lowerCAmelCase :Union[str, Any] = True
while i < len_pn and loop:
lowerCAmelCase :int = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowerCAmelCase :int = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowercase__ , lowercase__ )
and (len(lowercase__ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def UpperCAmelCase ( a__ , a__ ):
'''simple docstring'''
assert (
isinstance(lowercase__ , lowercase__ )
and isinstance(lowercase__ , lowercase__ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase :str = 0
while numbera != 0:
lowerCAmelCase :Optional[int] = numbera % numbera
lowerCAmelCase :Any = numbera
lowerCAmelCase :List[str] = rest
# precondition
assert isinstance(lowercase__ , lowercase__ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def UpperCAmelCase ( a__ , a__ ):
'''simple docstring'''
assert (
isinstance(lowercase__ , lowercase__ )
and isinstance(lowercase__ , lowercase__ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase :Optional[Any] = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowerCAmelCase :Tuple = prime_factorization(lowercase__ )
lowerCAmelCase :Optional[Any] = prime_factorization(lowercase__ )
elif numbera == 1 or numbera == 1:
lowerCAmelCase :Union[str, Any] = []
lowerCAmelCase :List[str] = []
lowerCAmelCase :List[Any] = max(lowercase__ , lowercase__ )
lowerCAmelCase :Optional[int] = 0
lowerCAmelCase :int = 0
lowerCAmelCase :Any = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowerCAmelCase :int = prime_fac_a.count(lowercase__ )
lowerCAmelCase :str = prime_fac_a.count(lowercase__ )
for _ in range(max(lowercase__ , lowercase__ ) ):
ans *= n
else:
lowerCAmelCase :List[str] = prime_fac_a.count(lowercase__ )
for _ in range(lowercase__ ):
ans *= n
done.append(lowercase__ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowerCAmelCase :List[str] = prime_fac_a.count(lowercase__ )
for _ in range(lowercase__ ):
ans *= n
done.append(lowercase__ )
# precondition
assert isinstance(lowercase__ , lowercase__ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def UpperCAmelCase ( a__ ):
'''simple docstring'''
assert isinstance(lowercase__ , lowercase__ ) and (n >= 0), "'number' must been a positive int"
lowerCAmelCase :Any = 0
lowerCAmelCase :Union[str, Any] = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowercase__ ):
ans += 1
# precondition
assert isinstance(lowercase__ , lowercase__ ) and is_prime(
lowercase__ ), "'ans' must been a prime number and from type int"
return ans
def UpperCAmelCase ( a__ , a__ ):
'''simple docstring'''
assert (
is_prime(lowercase__ ) and is_prime(lowercase__ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowerCAmelCase :List[str] = p_number_a + 1 # jump to the next number
lowerCAmelCase :Optional[Any] = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowercase__ ):
number += 1
while number < p_number_a:
ans.append(lowercase__ )
number += 1
# fetch the next prime number.
while not is_prime(lowercase__ ):
number += 1
# precondition
assert (
isinstance(lowercase__ , lowercase__ )
and ans[0] != p_number_a
and ans[len(lowercase__ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def UpperCAmelCase ( a__ ):
'''simple docstring'''
assert isinstance(lowercase__ , lowercase__ ) and (n >= 1), "'n' must been int and >= 1"
lowerCAmelCase :Optional[Any] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowercase__ )
# precondition
assert ans[0] == 1 and ans[len(lowercase__ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def UpperCAmelCase ( a__ ):
'''simple docstring'''
assert isinstance(lowercase__ , lowercase__ ) and (
number > 1
), "'number' must been an int and >= 1"
lowerCAmelCase :List[str] = get_divisors(lowercase__ )
# precondition
assert (
isinstance(lowercase__ , lowercase__ )
and (divisors[0] == 1)
and (divisors[len(lowercase__ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def UpperCAmelCase ( a__ , a__ ):
'''simple docstring'''
assert (
isinstance(lowercase__ , lowercase__ )
and isinstance(lowercase__ , lowercase__ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowerCAmelCase :int = gcd(abs(lowercase__ ) , abs(lowercase__ ) )
# precondition
assert (
isinstance(lowercase__ , lowercase__ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def UpperCAmelCase ( a__ ):
'''simple docstring'''
assert isinstance(lowercase__ , lowercase__ ) and (n >= 0), "'n' must been a int and >= 0"
lowerCAmelCase :Optional[int] = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def UpperCAmelCase ( a__ ):
'''simple docstring'''
assert isinstance(lowercase__ , lowercase__ ) and (n >= 0), "'n' must been an int and >= 0"
lowerCAmelCase :int = 0
lowerCAmelCase :Optional[int] = 1
lowerCAmelCase :Union[str, Any] = 1 # this will be return
for _ in range(n - 1 ):
lowerCAmelCase :Tuple = ans
ans += fiba
lowerCAmelCase :Optional[int] = tmp
return ans
| 553 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : List[str] = logging.get_logger(__name__)
_a : Dict = {
"facebook/timesformer": "https://huggingface.co/facebook/timesformer/resolve/main/config.json",
}
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : int = "timesformer"
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : List[str]=224 , SCREAMING_SNAKE_CASE_ : List[str]=16 , SCREAMING_SNAKE_CASE_ : Any=3 , SCREAMING_SNAKE_CASE_ : int=8 , SCREAMING_SNAKE_CASE_ : Tuple=768 , SCREAMING_SNAKE_CASE_ : int=12 , SCREAMING_SNAKE_CASE_ : Optional[int]=12 , SCREAMING_SNAKE_CASE_ : Optional[int]=3072 , SCREAMING_SNAKE_CASE_ : Tuple="gelu" , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.0 , SCREAMING_SNAKE_CASE_ : List[Any]=0.0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0_2 , SCREAMING_SNAKE_CASE_ : Any=1e-6 , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : List[str]="divided_space_time" , SCREAMING_SNAKE_CASE_ : int=0 , **SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> List[str]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = num_frames
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = qkv_bias
__snake_case = attention_type
__snake_case = drop_path_rate
| 56 | 0 |
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: int ):
return int(input_a == input_a == 0 )
def SCREAMING_SNAKE_CASE__ ( ):
print("""Truth Table of NOR Gate:""" )
print("""| Input 1 | Input 2 | Output |""" )
print(f'''| 0 | 0 | {nor_gate(0 , 0 )} |''' )
print(f'''| 0 | 1 | {nor_gate(0 , 1 )} |''' )
print(f'''| 1 | 0 | {nor_gate(1 , 0 )} |''' )
print(f'''| 1 | 1 | {nor_gate(1 , 1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 6 |
'''simple docstring'''
from typing import Any
class _lowercase :
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Any ) -> Any:
__snake_case = data
__snake_case = None
class _lowercase :
def __init__( self : List[Any] ) -> Tuple:
__snake_case = None
def a ( self : int ) -> Union[str, Any]:
__snake_case = self.head
while temp is not None:
print(temp.data , end=' ' )
__snake_case = temp.next
print()
def a ( self : Dict , SCREAMING_SNAKE_CASE_ : Any ) -> List[str]:
__snake_case = Node(SCREAMING_SNAKE_CASE_ )
__snake_case = self.head
__snake_case = new_node
def a ( self : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any ) -> List[str]:
if node_data_a == node_data_a:
return
else:
__snake_case = self.head
while node_a is not None and node_a.data != node_data_a:
__snake_case = node_a.next
__snake_case = self.head
while node_a is not None and node_a.data != node_data_a:
__snake_case = node_a.next
if node_a is None or node_a is None:
return
__snake_case , __snake_case = node_a.data, node_a.data
if __name__ == "__main__":
_a : Dict = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("After swapping")
ll.print_list()
| 56 | 0 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
lowercase__ : int = (3, 9, -1_1, 0, 7, 5, 1, -1)
lowercase__ : Tuple = (4, 6, 2, 0, 8, 1_0, 3, -2)
@dataclass
class lowercase_ :
"""simple docstring"""
UpperCAmelCase_ : int
UpperCAmelCase_ : Node | None
class lowercase_ :
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE ) ->None:
lowerCAmelCase = None
for i in sorted(SCREAMING_SNAKE_CASE_ , reverse=SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase = Node(SCREAMING_SNAKE_CASE_ , self.head )
def __iter__( self ) ->Iterator[int]:
lowerCAmelCase = self.head
while node:
yield node.data
lowerCAmelCase = node.next_node
def __len__( self ) ->int:
return sum(1 for _ in self )
def __str__( self ) ->str:
return " -> ".join([str(SCREAMING_SNAKE_CASE_ ) for node in self] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> SortedLinkedList:
return SortedLinkedList(list(lowercase__ ) + list(lowercase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ : Tuple = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 312 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_a : int = {
"configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"],
"tokenization_tapas": ["TapasTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
"TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TapasForMaskedLM",
"TapasForQuestionAnswering",
"TapasForSequenceClassification",
"TapasModel",
"TapasPreTrainedModel",
"load_tf_weights_in_tapas",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = [
"TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFTapasForMaskedLM",
"TFTapasForQuestionAnswering",
"TFTapasForSequenceClassification",
"TFTapasModel",
"TFTapasPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
_a : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 56 | 0 |
import os
from datetime import datetime as dt
from github import Github
A__ : Optional[Any] = [
"good first issue",
"feature request",
"wip",
]
def UpperCamelCase( ):
lowerCAmelCase_ : str = Github(os.environ['''GITHUB_TOKEN'''] )
lowerCAmelCase_ : Optional[int] = g.get_repo('''huggingface/accelerate''' )
lowerCAmelCase_ : Optional[int] = repo.get_issues(state='''open''' )
for issue in open_issues:
lowerCAmelCase_ : int = sorted([comment for comment in issue.get_comments()] ,key=lambda __UpperCamelCase : i.created_at ,reverse=lowercase__ )
lowerCAmelCase_ : int = comments[0] if len(lowercase__ ) > 0 else None
lowerCAmelCase_ : Optional[int] = dt.utcnow()
lowerCAmelCase_ : Optional[int] = (current_time - issue.updated_at).days
lowerCAmelCase_ : Any = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='''closed''' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 171 |
'''simple docstring'''
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _lowercase ( __lowercase , __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : List[str] = AutoencoderKL
_SCREAMING_SNAKE_CASE : Union[str, Any] = "sample"
_SCREAMING_SNAKE_CASE : Union[str, Any] = 1e-2
@property
def a ( self : List[str] ) -> Optional[int]:
__snake_case = 4
__snake_case = 3
__snake_case = (32, 32)
__snake_case = floats_tensor((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE_ )
return {"sample": image}
@property
def a ( self : List[Any] ) -> List[Any]:
return (3, 32, 32)
@property
def a ( self : int ) -> int:
return (3, 32, 32)
def a ( self : Tuple ) -> Union[str, Any]:
__snake_case = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
__snake_case = self.dummy_input
return init_dict, inputs_dict
def a ( self : Optional[Any] ) -> Any:
pass
def a ( self : Tuple ) -> List[Any]:
pass
@unittest.skipIf(torch_device == 'mps' , 'Gradient checkpointing skipped on MPS' )
def a ( self : List[str] ) -> int:
# enable deterministic behavior for gradient checkpointing
__snake_case , __snake_case = self.prepare_init_args_and_inputs_for_common()
__snake_case = self.model_class(**SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
assert not model.is_gradient_checkpointing and model.training
__snake_case = model(**SCREAMING_SNAKE_CASE_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
__snake_case = torch.randn_like(SCREAMING_SNAKE_CASE_ )
__snake_case = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
__snake_case = self.model_class(**SCREAMING_SNAKE_CASE_ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(SCREAMING_SNAKE_CASE_ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
__snake_case = model_a(**SCREAMING_SNAKE_CASE_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
__snake_case = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
__snake_case = dict(model.named_parameters() )
__snake_case = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) )
def a ( self : int ) -> int:
__snake_case , __snake_case = AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy' , output_loading_info=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(SCREAMING_SNAKE_CASE_ )
__snake_case = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def a ( self : Optional[int] ) -> List[str]:
__snake_case = AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy' )
__snake_case = model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
if torch_device == "mps":
__snake_case = torch.manual_seed(0 )
else:
__snake_case = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
__snake_case = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
__snake_case = image.to(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ , sample_posterior=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).sample
__snake_case = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
__snake_case = torch.tensor(
[
-4.0_078e-01,
-3.8_323e-04,
-1.2_681e-01,
-1.1_462e-01,
2.0_095e-01,
1.0_893e-01,
-8.8_247e-02,
-3.0_361e-01,
-9.8_644e-03,
] )
elif torch_device == "cpu":
__snake_case = torch.tensor(
[-0.1_3_5_2, 0.0_8_7_8, 0.0_4_1_9, -0.0_8_1_8, -0.1_0_6_9, 0.0_6_8_8, -0.1_4_5_8, -0.4_4_4_6, -0.0_0_2_6] )
else:
__snake_case = torch.tensor(
[-0.2_4_2_1, 0.4_6_4_2, 0.2_5_0_7, -0.0_4_3_8, 0.0_6_8_2, 0.3_1_6_0, -0.2_0_1_8, -0.0_7_2_7, 0.2_4_8_5] )
self.assertTrue(torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , rtol=1e-2 ) )
@slow
class _lowercase ( unittest.TestCase ):
def a ( self : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any ) -> Union[str, Any]:
return f'gaussian_noise_s={seed}_shape={"_".join([str(SCREAMING_SNAKE_CASE_ ) for s in shape] )}.npy'
def a ( self : Optional[Any] ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : int , SCREAMING_SNAKE_CASE_ : Optional[Any]=0 , SCREAMING_SNAKE_CASE_ : int=(4, 3, 512, 512) , SCREAMING_SNAKE_CASE_ : str=False ) -> int:
__snake_case = torch.floataa if fpaa else torch.floataa
__snake_case = torch.from_numpy(load_hf_numpy(self.get_file_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) ).to(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
return image
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple="CompVis/stable-diffusion-v1-4" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False ) -> List[str]:
__snake_case = 'fp16' if fpaa else None
__snake_case = torch.floataa if fpaa else torch.floataa
__snake_case = AutoencoderKL.from_pretrained(
SCREAMING_SNAKE_CASE_ , subfolder='vae' , torch_dtype=SCREAMING_SNAKE_CASE_ , revision=SCREAMING_SNAKE_CASE_ , )
model.to(SCREAMING_SNAKE_CASE_ ).eval()
return model
def a ( self : List[str] , SCREAMING_SNAKE_CASE_ : Tuple=0 ) -> Union[str, Any]:
if torch_device == "mps":
return torch.manual_seed(SCREAMING_SNAKE_CASE_ )
return torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_3, 0.9_8_7_8, -0.0_4_9_5, -0.0_7_9_0, -0.2_7_0_9, 0.8_3_7_5, -0.2_0_6_0, -0.0_8_2_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_6, 0.1_1_6_8, 0.1_3_3_2, -0.4_8_4_0, -0.2_5_0_8, -0.0_7_9_1, -0.0_4_9_3, -0.4_0_8_9], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def a ( self : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_generator(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , sample_posterior=SCREAMING_SNAKE_CASE_ ).sample
assert sample.shape == image.shape
__snake_case = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__snake_case = torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_5_1_3, 0.0_2_8_9, 1.3_7_9_9, 0.2_1_6_6, -0.2_5_7_3, -0.0_8_7_1, 0.5_1_0_3, -0.0_9_9_9]],
[47, [-0.4_1_2_8, -0.1_3_2_0, -0.3_7_0_4, 0.1_9_6_5, -0.4_1_1_6, -0.2_3_3_2, -0.3_3_4_0, 0.2_2_4_7]],
# fmt: on
] )
@require_torch_gpu
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Union[str, Any]:
__snake_case = self.get_sd_vae_model(fpaa=SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , fpaa=SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_generator(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , sample_posterior=SCREAMING_SNAKE_CASE_ ).sample
assert sample.shape == image.shape
__snake_case = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE_ )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_9, 0.9_8_6_6, -0.0_4_8_7, -0.0_7_7_7, -0.2_7_1_6, 0.8_3_6_8, -0.2_0_5_5, -0.0_8_1_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_7, 0.1_1_4_7, 0.1_3_3_3, -0.4_8_4_1, -0.2_5_0_6, -0.0_8_0_5, -0.0_4_9_1, -0.4_0_8_5], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def a ( self : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict ) -> List[Any]:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ ).sample
assert sample.shape == image.shape
__snake_case = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__snake_case = torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_0_5_1, -0.1_8_0_3, -0.2_3_1_1, -0.2_1_1_4, -0.3_2_9_2, -0.3_5_7_4, -0.2_9_5_3, -0.3_3_2_3]],
[37, [-0.2_6_3_2, -0.2_6_2_5, -0.2_1_9_9, -0.2_7_4_1, -0.4_5_3_9, -0.4_9_9_0, -0.3_7_2_0, -0.4_9_2_5]],
# fmt: on
] )
@require_torch_gpu
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> int:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__snake_case = sample[-1, -2:, :2, -2:].flatten().cpu()
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE_ )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_3_6_9, 0.0_2_0_7, -0.0_7_7_6, -0.0_6_8_2, -0.1_7_4_7, -0.1_9_3_0, -0.1_4_6_5, -0.2_0_3_9]],
[16, [-0.1_6_2_8, -0.2_1_3_4, -0.2_7_4_7, -0.2_6_4_2, -0.3_7_7_4, -0.4_4_0_4, -0.3_6_8_7, -0.4_2_7_7]],
# fmt: on
] )
@require_torch_gpu
def a ( self : int , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> str:
__snake_case = self.get_sd_vae_model(fpaa=SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 64, 64) , fpaa=SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__snake_case = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE_ )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='xformers is not required when using PyTorch 2.0.' )
def a ( self : Any , SCREAMING_SNAKE_CASE_ : int ) -> Tuple:
__snake_case = self.get_sd_vae_model(fpaa=SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 64, 64) , fpaa=SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='xformers is not required when using PyTorch 2.0.' )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE_ : int ) -> str:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_0_0_1, 0.0_9_1_8, -2.6_9_8_4, -3.9_7_2_0, -3.2_0_9_9, -5.0_3_5_3, 1.7_3_3_8, -0.2_0_6_5, 3.4_2_6_7]],
[47, [-1.5_0_3_0, -4.3_8_7_1, -6.0_3_5_5, -9.1_1_5_7, -1.6_6_6_1, -2.7_8_5_3, 2.1_6_0_7, -5.0_8_2_3, 2.5_6_3_3]],
# fmt: on
] )
def a ( self : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Union[str, Any]:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_generator(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model.encode(SCREAMING_SNAKE_CASE_ ).latent_dist
__snake_case = dist.sample(generator=SCREAMING_SNAKE_CASE_ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
__snake_case = sample[0, -1, -3:, -3:].flatten().cpu()
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE_ )
__snake_case = 3e-3 if torch_device != 'mps' else 1e-2
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=SCREAMING_SNAKE_CASE_ )
| 56 | 0 |
import os
from typing import Dict, List, Tuple, TypeVar, Union
_SCREAMING_SNAKE_CASE = TypeVar("T")
_SCREAMING_SNAKE_CASE = Union[List[T], Tuple[T, ...]]
_SCREAMING_SNAKE_CASE = Union[T, List[T], Dict[str, T]]
_SCREAMING_SNAKE_CASE = Union[str, bytes, os.PathLike]
| 181 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _lowercase ( __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ShapEPipeline
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["prompt"]
_SCREAMING_SNAKE_CASE : Any = ["prompt"]
_SCREAMING_SNAKE_CASE : str = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
_SCREAMING_SNAKE_CASE : Optional[int] = False
@property
def a ( self : Any ) -> Optional[int]:
return 32
@property
def a ( self : List[Any] ) -> List[Any]:
return 32
@property
def a ( self : Tuple ) -> List[str]:
return self.time_input_dim * 4
@property
def a ( self : Dict ) -> Union[str, Any]:
return 8
@property
def a ( self : List[Any] ) -> Optional[Any]:
__snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def a ( self : Dict ) -> Any:
torch.manual_seed(0 )
__snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(SCREAMING_SNAKE_CASE_ )
@property
def a ( self : str ) -> Dict:
torch.manual_seed(0 )
__snake_case = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__snake_case = PriorTransformer(**SCREAMING_SNAKE_CASE_ )
return model
@property
def a ( self : Optional[Any] ) -> Dict:
torch.manual_seed(0 )
__snake_case = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__snake_case = ShapERenderer(**SCREAMING_SNAKE_CASE_ )
return model
def a ( self : Tuple ) -> Dict:
__snake_case = self.dummy_prior
__snake_case = self.dummy_text_encoder
__snake_case = self.dummy_tokenizer
__snake_case = self.dummy_renderer
__snake_case = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1024 , prediction_type='sample' , use_karras_sigmas=SCREAMING_SNAKE_CASE_ , clip_sample=SCREAMING_SNAKE_CASE_ , clip_sample_range=1.0 , )
__snake_case = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def a ( self : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int]=0 ) -> Union[str, Any]:
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
__snake_case = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
__snake_case = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
__snake_case = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def a ( self : Optional[Any] ) -> str:
__snake_case = 'cpu'
__snake_case = self.get_dummy_components()
__snake_case = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
__snake_case = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__snake_case = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) )
__snake_case = output.images[0]
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__snake_case = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a ( self : int ) -> List[str]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def a ( self : Dict ) -> Any:
__snake_case = torch_device == 'cpu'
__snake_case = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=SCREAMING_SNAKE_CASE_ , relax_max_difference=SCREAMING_SNAKE_CASE_ , )
def a ( self : Union[str, Any] ) -> str:
__snake_case = self.get_dummy_components()
__snake_case = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
__snake_case = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__snake_case = 1
__snake_case = 2
__snake_case = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
for key in inputs.keys():
if key in self.batch_params:
__snake_case = batch_size * [inputs[key]]
__snake_case = pipe(**SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
def a ( self : Optional[int] ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : Union[str, Any] ) -> Optional[Any]:
__snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
__snake_case = ShapEPipeline.from_pretrained('openai/shap-e' )
__snake_case = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__snake_case = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
__snake_case = pipe(
'a shark' , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 56 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : int , _lowercase : List[str] , _lowercase : List[Any]=7 , _lowercase : Union[str, Any]=3 , _lowercase : str=18 , _lowercase : str=30 , _lowercase : Tuple=400 , _lowercase : List[Any]=True , _lowercase : Optional[Any]=None , _lowercase : Optional[Any]=True , _lowercase : Optional[int]=False , _lowercase : Any=True , _lowercase : List[str]=True , _lowercase : Optional[int]=[0.5, 0.5, 0.5] , _lowercase : Tuple=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
_UpperCamelCase: Dict = parent
_UpperCamelCase: List[str] = batch_size
_UpperCamelCase: Dict = num_channels
_UpperCamelCase: List[str] = image_size
_UpperCamelCase: Optional[int] = min_resolution
_UpperCamelCase: Tuple = max_resolution
_UpperCamelCase: str = do_resize
_UpperCamelCase: List[str] = size if size is not None else {'''height''': 18, '''width''': 20}
_UpperCamelCase: Tuple = do_thumbnail
_UpperCamelCase: Optional[Any] = do_align_axis
_UpperCamelCase: Optional[Any] = do_pad
_UpperCamelCase: Tuple = do_normalize
_UpperCamelCase: Tuple = image_mean
_UpperCamelCase: Tuple = image_std
def lowerCAmelCase ( self : Optional[int] ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __magic_name__ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase : Tuple = DonutImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self : str ):
"""simple docstring"""
_UpperCamelCase: List[str] = DonutImageProcessingTester(self )
@property
def lowerCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : str ):
"""simple docstring"""
_UpperCamelCase: Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_resize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''size''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_thumbnail''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_align_long_axis''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_pad''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_normalize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''image_mean''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''image_std''' ) )
def lowerCAmelCase ( self : str ):
"""simple docstring"""
_UpperCamelCase: Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 20} )
_UpperCamelCase: List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
# Previous config had dimensions in (width, height) order
_UpperCamelCase: int = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'''height''': 84, '''width''': 42} )
def lowerCAmelCase ( self : List[str] ):
"""simple docstring"""
pass
@is_flaky()
def lowerCAmelCase ( self : Dict ):
"""simple docstring"""
_UpperCamelCase: Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCamelCase: List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image )
# Test not batched input
_UpperCamelCase: str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_UpperCamelCase: str = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def lowerCAmelCase ( self : List[str] ):
"""simple docstring"""
_UpperCamelCase: Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCamelCase: List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , numpify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray )
# Test not batched input
_UpperCamelCase: Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_UpperCamelCase: Optional[Any] = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def lowerCAmelCase ( self : str ):
"""simple docstring"""
_UpperCamelCase: List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCamelCase: Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
# Test not batched input
_UpperCamelCase: List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_UpperCamelCase: List[Any] = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 271 |
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
_a : Optional[Any] = 100
_a : Dict = set(range(3, NUM_PRIMES, 2))
primes.add(2)
_a : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_0_0 )
def _a (lowercase__ : int ) -> set[int]:
"""simple docstring"""
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
__snake_case = set()
__snake_case = 42
__snake_case = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def _a (lowercase__ : int = 5_0_0_0 ) -> int | None:
"""simple docstring"""
for number_to_partition in range(1 , lowercase__ ):
if len(partition(lowercase__ ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f'''{solution() = }''')
| 56 | 0 |
"""simple docstring"""
from __future__ import annotations
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
return [ord(lowercase__ ) - 96 for elem in plain]
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
return "".join(chr(elem + 96 ) for elem in encoded )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = encode(input("-> " ).strip().lower() )
print("Encoded: " , lowercase__ )
print("Decoded:" , decode(lowercase__ ) )
if __name__ == "__main__":
main()
| 657 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_a : str = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def _a () -> Dict:
"""simple docstring"""
__snake_case = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
__snake_case = get_sagemaker_input()
else:
__snake_case = get_cluster_input()
return config
def _a (lowercase__ : Union[str, Any]=None ) -> int:
"""simple docstring"""
if subparsers is not None:
__snake_case = subparsers.add_parser('config' , description=lowercase__ )
else:
__snake_case = argparse.ArgumentParser('Accelerate config command' , description=lowercase__ )
parser.add_argument(
'--config_file' , default=lowercase__ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=lowercase__ )
return parser
def _a (lowercase__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case = get_user_input()
if args.config_file is not None:
__snake_case = args.config_file
else:
if not os.path.isdir(lowercase__ ):
os.makedirs(lowercase__ )
__snake_case = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(lowercase__ )
else:
config.to_yaml_file(lowercase__ )
print(f'accelerate configuration saved at {config_file}' )
def _a () -> int:
"""simple docstring"""
__snake_case = config_command_parser()
__snake_case = parser.parse_args()
config_command(lowercase__ )
if __name__ == "__main__":
main()
| 56 | 0 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowercase_ : Dict = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
lowercase_ : str = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
lowercase_ : int = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __UpperCamelCase :
__A = field(
default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
__A = field(
default=__lowercase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__A = field(
default=__lowercase , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , )
__A = field(default=__lowercase , metadata={'''help''': '''A folder containing the training data.'''} )
__A = field(default=__lowercase , metadata={'''help''': '''A folder containing the validation data.'''} )
__A = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
__A = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} )
__A = field(
default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , )
__A = field(
default=__lowercase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
__A = field(
default=__lowercase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = {}
if self.train_dir is not None:
lowercase = self.train_dir
if self.validation_dir is not None:
lowercase = self.validation_dir
lowercase = data_files if data_files else None
@dataclass
class __UpperCamelCase :
__A = field(
default=__lowercase , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a '''
'''checkpoint identifier on the hub. '''
'''Don\'t set if you want to train a model from scratch.'''
)
} , )
__A = field(
default=__lowercase , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(__lowercase )} , )
__A = field(
default=__lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__A = field(
default=__lowercase , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
__A = field(
default=__lowercase , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , )
__A = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
__A = field(default=__lowercase , metadata={'''help''': '''Name or path of preprocessor config.'''} )
__A = field(
default=__lowercase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
__A = field(
default=__lowercase , metadata={
'''help''': (
'''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.'''
)
} , )
__A = field(
default=__lowercase , metadata={
'''help''': (
'''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.'''
)
} , )
__A = field(
default=__lowercase , metadata={'''help''': '''Stride to use for the encoder.'''} , )
class __UpperCamelCase :
def __init__( self , _lowerCAmelCase=192 , _lowerCAmelCase=32 , _lowerCAmelCase=4 , _lowerCAmelCase=0.6 ) -> List[str]:
'''simple docstring'''
lowercase = input_size
lowercase = mask_patch_size
lowercase = model_patch_size
lowercase = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError("""Input size must be divisible by mask patch size""" )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError("""Mask patch size must be divisible by model patch size""" )
lowercase = self.input_size // self.mask_patch_size
lowercase = self.mask_patch_size // self.model_patch_size
lowercase = self.rand_size**2
lowercase = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self ) -> Tuple:
'''simple docstring'''
lowercase = np.random.permutation(self.token_count )[: self.mask_count]
lowercase = np.zeros(self.token_count , dtype=SCREAMING_SNAKE_CASE_ )
lowercase = 1
lowercase = mask.reshape((self.rand_size, self.rand_size) )
lowercase = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] ):
lowercase = torch.stack([example["""pixel_values"""] for example in examples] )
lowercase = torch.stack([example["""mask"""] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def SCREAMING_SNAKE_CASE ( ):
lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase , lowercase , lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase , lowercase , lowercase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mim""" , lowercase__ , lowercase__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase = training_args.get_process_log_level()
logger.setLevel(lowercase__ )
transformers.utils.logging.set_verbosity(lowercase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowercase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
lowercase = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowercase__ ) and data_args.train_val_split > 0.0:
lowercase = ds["""train"""].train_test_split(data_args.train_val_split )
lowercase = split["""train"""]
lowercase = split["""test"""]
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
lowercase = AutoConfig.from_pretrained(model_args.config_name_or_path , **lowercase__ )
elif model_args.model_name_or_path:
lowercase = AutoConfig.from_pretrained(model_args.model_name_or_path , **lowercase__ )
else:
lowercase = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(lowercase__ , """decoder_type""" ):
lowercase = """simmim"""
# adapt config
lowercase = model_args.image_size if model_args.image_size is not None else config.image_size
lowercase = model_args.patch_size if model_args.patch_size is not None else config.patch_size
lowercase = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
"""image_size""": model_args.image_size,
"""patch_size""": model_args.patch_size,
"""encoder_stride""": model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
lowercase = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **lowercase__ )
elif model_args.model_name_or_path:
lowercase = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **lowercase__ )
else:
lowercase = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
lowercase = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
lowercase = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
lowercase = AutoModelForMaskedImageModeling.from_config(lowercase__ )
if training_args.do_train:
lowercase = ds["""train"""].column_names
else:
lowercase = ds["""validation"""].column_names
if data_args.image_column_name is not None:
lowercase = data_args.image_column_name
elif "image" in column_names:
lowercase = """image"""
elif "img" in column_names:
lowercase = """img"""
else:
lowercase = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
lowercase = Compose(
[
Lambda(lambda lowercase_ : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
lowercase = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(lowercase_ : Union[str, Any] ):
lowercase = [transforms(lowercase__ ) for image in examples[image_column_name]]
lowercase = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
lowercase = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(lowercase__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
lowercase = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(lowercase__ )
# Initialize our trainer
lowercase = Trainer(
model=lowercase__ , args=lowercase__ , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=lowercase__ , data_collator=lowercase__ , )
# Training
if training_args.do_train:
lowercase = None
if training_args.resume_from_checkpoint is not None:
lowercase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase = last_checkpoint
lowercase = trainer.train(resume_from_checkpoint=lowercase__ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase = trainer.evaluate()
trainer.log_metrics("""eval""" , lowercase__ )
trainer.save_metrics("""eval""" , lowercase__ )
# Write model card and (optionally) push to hub
lowercase = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """masked-image-modeling""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-image-modeling"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase__ )
else:
trainer.create_model_card(**lowercase__ )
if __name__ == "__main__":
main()
| 588 |
'''simple docstring'''
from __future__ import annotations
import math
def _a (lowercase__ : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowercase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
_a : Dict = [num for num in range(3, 100_001, 2) if not is_prime(num)]
def _a (lowercase__ : int ) -> list[int]:
"""simple docstring"""
if not isinstance(lowercase__ , lowercase__ ):
raise ValueError('n must be an integer' )
if n <= 0:
raise ValueError('n must be >= 0' )
__snake_case = []
for num in range(len(lowercase__ ) ):
__snake_case = 0
while 2 * i * i <= odd_composites[num]:
__snake_case = odd_composites[num] - 2 * i * i
if is_prime(lowercase__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(lowercase__ ) == n:
return list_nums
return []
def _a () -> int:
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 56 | 0 |
'''simple docstring'''
a : Union[str, Any] = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
a : int = [{"type": "code", "content": INSTALL_CONTENT}]
a : Optional[int] = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 640 |
'''simple docstring'''
from __future__ import annotations
def _a (lowercase__ : int , lowercase__ : int ) -> list[str]:
"""simple docstring"""
if partitions <= 0:
raise ValueError('partitions must be a positive number!' )
if partitions > number_of_bytes:
raise ValueError('partitions can not > number_of_bytes!' )
__snake_case = number_of_bytes // partitions
__snake_case = []
for i in range(lowercase__ ):
__snake_case = i * bytes_per_partition + 1
__snake_case = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
__UpperCAmelCase = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase = {
"vocab_file": {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt",
},
"tokenizer_file": {
"unc-nlp/lxmert-base-uncased": (
"https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"
),
},
}
__UpperCAmelCase = {
"unc-nlp/lxmert-base-uncased": 512,
}
__UpperCAmelCase = {
"unc-nlp/lxmert-base-uncased": {"do_lower_case": True},
}
class lowerCamelCase (__lowercase ):
'''simple docstring'''
_snake_case : List[Any] = VOCAB_FILES_NAMES
_snake_case : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_snake_case : List[Any] = PRETRAINED_INIT_CONFIGURATION
_snake_case : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Any = LxmertTokenizer
def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase="[UNK]" , _UpperCamelCase="[SEP]" , _UpperCamelCase="[PAD]" , _UpperCamelCase="[CLS]" , _UpperCamelCase="[MASK]" , _UpperCamelCase=True , _UpperCamelCase=None , **_UpperCamelCase , ) -> List[Any]:
super().__init__(
SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
UpperCAmelCase_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , SCREAMING_SNAKE_CASE_ ) != do_lower_case
or normalizer_state.get('strip_accents' , SCREAMING_SNAKE_CASE_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , SCREAMING_SNAKE_CASE_ ) != tokenize_chinese_chars
):
UpperCAmelCase_ : Any = getattr(SCREAMING_SNAKE_CASE_ , normalizer_state.pop('type' ) )
UpperCAmelCase_ : Dict = do_lower_case
UpperCAmelCase_ : Optional[int] = strip_accents
UpperCAmelCase_ : int = tokenize_chinese_chars
UpperCAmelCase_ : Optional[Any] = normalizer_class(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ : str = do_lower_case
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=None ) -> List[str]:
UpperCAmelCase_ : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]:
UpperCAmelCase_ : str = [self.sep_token_id]
UpperCAmelCase_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> Tuple[str]:
UpperCAmelCase_ : str = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
| 406 |
'''simple docstring'''
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class _lowercase ( __lowercase ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.0_1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1000 ) -> Tuple:
__snake_case = p_stop
__snake_case = max_length
def __iter__( self : Any ) -> Union[str, Any]:
__snake_case = 0
__snake_case = False
while not stop and count < self.max_length:
yield count
count += 1
__snake_case = random.random() < self.p_stop
class _lowercase ( unittest.TestCase ):
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : str=False , SCREAMING_SNAKE_CASE_ : str=True ) -> Union[str, Any]:
__snake_case = [
BatchSamplerShard(SCREAMING_SNAKE_CASE_ , 2 , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
for i in range(2 )
]
__snake_case = [list(SCREAMING_SNAKE_CASE_ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(SCREAMING_SNAKE_CASE_ ) for shard in batch_sampler_shards] , [len(SCREAMING_SNAKE_CASE_ ) for e in expected] )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Tuple ) -> str:
# Check the shards when the dataset is a round multiple of total batch size.
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] ) -> Union[str, Any]:
# Check the shards when the dataset is a round multiple of batch size.
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size.
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
def a ( self : str ) -> str:
# Check the shards when the dataset is a round multiple of total batch size.
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[[0, 1]], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
def a ( self : int ) -> Tuple:
# Check the shards when the dataset is a round multiple of batch size.
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size.
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[[0, 1]], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[int] ) -> Tuple:
__snake_case = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
__snake_case = [BatchSamplerShard(SCREAMING_SNAKE_CASE_ , 2 , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int=False , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : int=False ) -> List[Any]:
random.seed(SCREAMING_SNAKE_CASE_ )
__snake_case = list(SCREAMING_SNAKE_CASE_ )
__snake_case = [
IterableDatasetShard(
SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , drop_last=SCREAMING_SNAKE_CASE_ , num_processes=SCREAMING_SNAKE_CASE_ , process_index=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , )
for i in range(SCREAMING_SNAKE_CASE_ )
]
__snake_case = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(SCREAMING_SNAKE_CASE_ )
iterable_dataset_lists.append(list(SCREAMING_SNAKE_CASE_ ) )
__snake_case = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
__snake_case = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
self.assertTrue(len(SCREAMING_SNAKE_CASE_ ) % shard_batch_size == 0 )
__snake_case = []
for idx in range(0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(SCREAMING_SNAKE_CASE_ ) < len(SCREAMING_SNAKE_CASE_ ):
reference += reference
self.assertListEqual(SCREAMING_SNAKE_CASE_ , reference[: len(SCREAMING_SNAKE_CASE_ )] )
def a ( self : Dict ) -> Tuple:
__snake_case = 42
__snake_case = RandomIterableDataset()
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Edge case with a very small dataset
__snake_case = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] ) -> str:
__snake_case = BatchSampler(range(16 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = SkipBatchSampler(SCREAMING_SNAKE_CASE_ , 2 )
self.assertListEqual(list(SCREAMING_SNAKE_CASE_ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a ( self : str ) -> Union[str, Any]:
__snake_case = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a ( self : Any ) -> str:
__snake_case = DataLoader(list(range(16 ) ) , batch_size=4 )
__snake_case = skip_first_batches(SCREAMING_SNAKE_CASE_ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a ( self : Dict ) -> Optional[Any]:
__snake_case = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def a ( self : Tuple ) -> Dict:
Accelerator()
__snake_case = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 56 | 0 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : str , lowerCamelCase__ : bool = False ):
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ):
A: List[str] = f'Expected string as input, found {type(lowercase__ )}'
raise ValueError(lowercase__ )
if not isinstance(lowercase__ , lowercase__ ):
A: Dict = f'Expected boolean as use_pascal parameter, found {type(lowercase__ )}'
raise ValueError(lowercase__ )
A: Tuple = input_str.split("""_""" )
A: List[Any] = 0 if use_pascal else 1
A: Dict = words[start_index:]
A: int = [word[0].upper() + word[1:] for word in words_to_capitalize]
A: str = """""" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 135 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
_a : int = get_tests_dir("fixtures/test_sentencepiece.model")
_a : Dict = {"target_lang": "fi", "source_lang": "en"}
_a : Optional[int] = ">>zh<<"
_a : List[str] = "Helsinki-NLP/"
if is_torch_available():
_a : List[str] = "pt"
elif is_tf_available():
_a : Dict = "tf"
else:
_a : Union[str, Any] = "jax"
@require_sentencepiece
class _lowercase ( __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : int = MarianTokenizer
_SCREAMING_SNAKE_CASE : str = False
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
def a ( self : int ) -> int:
super().setUp()
__snake_case = ['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>']
__snake_case = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
__snake_case = Path(self.tmpdirname )
save_json(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['vocab'] )
save_json(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['tokenizer_config_file'] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['source_spm'] )
copyfile(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['target_spm'] )
__snake_case = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def a ( self : int , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> MarianTokenizer:
return MarianTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def a ( self : str , SCREAMING_SNAKE_CASE_ : List[str] ) -> List[Any]:
return (
"This is a test",
"This is a test",
)
def a ( self : int ) -> Optional[Any]:
__snake_case = '</s>'
__snake_case = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def a ( self : Dict ) -> List[str]:
__snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '</s>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '<pad>' )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 9 )
def a ( self : List[Any] ) -> str:
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def a ( self : Any ) -> Optional[int]:
__snake_case = MarianTokenizer.from_pretrained(f'{ORG_NAME}opus-mt-en-de' )
__snake_case = en_de_tokenizer(['I am a small frog'] , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = [38, 121, 14, 697, 3_8848, 0]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , batch.input_ids[0] )
__snake_case = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
__snake_case = [x.name for x in Path(SCREAMING_SNAKE_CASE_ ).glob('*' )]
self.assertIn('source.spm' , SCREAMING_SNAKE_CASE_ )
MarianTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[int] ) -> Any:
__snake_case = self.get_tokenizer()
__snake_case = tok(
['I am a small frog' * 1000, 'I am a small frog'] , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def a ( self : Tuple ) -> Dict:
__snake_case = self.get_tokenizer()
__snake_case = tok(['I am a tiny frog', 'I am a small frog'] , padding=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def a ( self : int ) -> int:
# fmt: off
__snake_case = {'input_ids': [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE_ , model_name='Helsinki-NLP/opus-mt-en-de' , revision='1a8c2263da11e68e50938f97e10cd57820bd504c' , decode_kwargs={'use_source_tokenizer': True} , )
def a ( self : Dict ) -> str:
__snake_case = MarianTokenizer.from_pretrained('hf-internal-testing/test-marian-two-vocabs' )
__snake_case = 'Tämä on testi'
__snake_case = 'This is a test'
__snake_case = [76, 7, 2047, 2]
__snake_case = [69, 12, 11, 940, 2]
__snake_case = tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer(text_target=SCREAMING_SNAKE_CASE_ ).input_ids
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 56 | 0 |
"""simple docstring"""
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __UpperCamelCase ( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
lowercase_ : Dict = StableUnCLIPPipeline
lowercase_ : Tuple = TEXT_TO_IMAGE_PARAMS
lowercase_ : Any = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase_ : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
lowercase_ : Any = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
lowercase_ : Optional[int] = False
def UpperCAmelCase__ ( self : Tuple ) -> Any:
lowerCAmelCase :str = 32
lowerCAmelCase :str = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowerCAmelCase :Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
lowerCAmelCase :List[Any] = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=SCREAMING_SNAKE_CASE_ , projection_dim=SCREAMING_SNAKE_CASE_ , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowerCAmelCase :Tuple = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=SCREAMING_SNAKE_CASE_ , num_layers=1 , )
torch.manual_seed(0 )
lowerCAmelCase :Union[str, Any] = DDPMScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1000 , clip_sample=SCREAMING_SNAKE_CASE_ , clip_sample_range=5.0 , beta_schedule='squaredcos_cap_v2' , )
# regular denoising components
torch.manual_seed(0 )
lowerCAmelCase :Optional[int] = StableUnCLIPImageNormalizer(embedding_dim=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase :Optional[Any] = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
lowerCAmelCase :Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
lowerCAmelCase :List[str] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=SCREAMING_SNAKE_CASE_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowerCAmelCase :int = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=SCREAMING_SNAKE_CASE_ , layers_per_block=1 , upcast_attention=SCREAMING_SNAKE_CASE_ , use_linear_projection=SCREAMING_SNAKE_CASE_ , )
torch.manual_seed(0 )
lowerCAmelCase :Tuple = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type='v_prediction' , set_alpha_to_one=SCREAMING_SNAKE_CASE_ , steps_offset=1 , )
torch.manual_seed(0 )
lowerCAmelCase :Dict = AutoencoderKL()
lowerCAmelCase :Any = {
# prior components
'prior_tokenizer': prior_tokenizer,
'prior_text_encoder': prior_text_encoder,
'prior': prior,
'prior_scheduler': prior_scheduler,
# image noising components
'image_normalizer': image_normalizer,
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder,
'unet': unet,
'scheduler': scheduler,
'vae': vae,
}
return components
def UpperCAmelCase__ ( self : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any]=0 ) -> Optional[Any]:
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
lowerCAmelCase :Optional[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
lowerCAmelCase :Optional[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase :List[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'prior_num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def UpperCAmelCase__ ( self : int ) -> str:
lowerCAmelCase :List[str] = torch_device == 'cpu'
self._test_attention_slicing_forward_pass(test_max_difference=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[Any]:
lowerCAmelCase :List[Any] = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=SCREAMING_SNAKE_CASE_ )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Any ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
lowerCAmelCase :Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy' )
lowerCAmelCase :List[str] = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCAmelCase :Union[str, Any] = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase :Optional[int] = pipe('anime turle' , generator=SCREAMING_SNAKE_CASE_ , output_type='np' )
lowerCAmelCase :int = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[int]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase :Union[str, Any] = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
lowerCAmelCase :Dict = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCAmelCase :str = pipe(
'anime turtle' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='np' , )
lowerCAmelCase :Optional[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 553 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def _a (lowercase__ : bytes ) -> bytes:
"""simple docstring"""
if len(lowercase__ ) != 3_2:
raise ValueError('Input must be of length 32' )
__snake_case = B''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def _a (lowercase__ : int ) -> bytes:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
__snake_case = format(lowercase__ , '08x' )[-8:]
__snake_case = B''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def _a (lowercase__ : bytes ) -> bytes:
"""simple docstring"""
__snake_case = B''
for char in message:
bit_string += format(lowercase__ , '08b' ).encode('utf-8' )
__snake_case = format(len(lowercase__ ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(lowercase__ ) % 5_1_2 != 4_4_8:
bit_string += b"0"
bit_string += to_little_endian(start_len[3_2:] ) + to_little_endian(start_len[:3_2] )
return bit_string
def _a (lowercase__ : bytes ) -> Generator[list[int], None, None]:
"""simple docstring"""
if len(lowercase__ ) % 5_1_2 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(lowercase__ ) , 5_1_2 ):
__snake_case = bit_string[pos : pos + 5_1_2]
__snake_case = []
for i in range(0 , 5_1_2 , 3_2 ):
block_words.append(int(to_little_endian(block[i : i + 3_2] ) , 2 ) )
yield block_words
def _a (lowercase__ : int ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
__snake_case = format(lowercase__ , '032b' )
__snake_case = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(lowercase__ , 2 )
def _a (lowercase__ : int , lowercase__ : int ) -> int:
"""simple docstring"""
return (a + b) % 2**3_2
def _a (lowercase__ : int , lowercase__ : int ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (3_2 - shift))) % 2**3_2
def _a (lowercase__ : bytes ) -> bytes:
"""simple docstring"""
__snake_case = preprocess(lowercase__ )
__snake_case = [int(2**3_2 * abs(sin(i + 1 ) ) ) for i in range(6_4 )]
# Starting states
__snake_case = 0x6_7_4_5_2_3_0_1
__snake_case = 0xE_F_C_D_A_B_8_9
__snake_case = 0x9_8_B_A_D_C_F_E
__snake_case = 0x1_0_3_2_5_4_7_6
__snake_case = [
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(lowercase__ ):
__snake_case = aa
__snake_case = ba
__snake_case = ca
__snake_case = da
# Hash current chunk
for i in range(6_4 ):
if i <= 1_5:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
__snake_case = d ^ (b & (c ^ d))
__snake_case = i
elif i <= 3_1:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
__snake_case = c ^ (d & (b ^ c))
__snake_case = (5 * i + 1) % 1_6
elif i <= 4_7:
__snake_case = b ^ c ^ d
__snake_case = (3 * i + 5) % 1_6
else:
__snake_case = c ^ (b | not_aa(lowercase__ ))
__snake_case = (7 * i) % 1_6
__snake_case = (f + a + added_consts[i] + block_words[g]) % 2**3_2
__snake_case = d
__snake_case = c
__snake_case = b
__snake_case = sum_aa(lowercase__ , left_rotate_aa(lowercase__ , shift_amounts[i] ) )
# Add hashed chunk to running total
__snake_case = sum_aa(lowercase__ , lowercase__ )
__snake_case = sum_aa(lowercase__ , lowercase__ )
__snake_case = sum_aa(lowercase__ , lowercase__ )
__snake_case = sum_aa(lowercase__ , lowercase__ )
__snake_case = reformat_hex(lowercase__ ) + reformat_hex(lowercase__ ) + reformat_hex(lowercase__ ) + reformat_hex(lowercase__ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 | 0 |
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
_lowerCamelCase = logging.get_logger(__name__)
class UpperCamelCase_ ( __lowercase ):
def __init__( self :List[Any] , *__A :Dict , **__A :List[str] ) -> None:
"""simple docstring"""
warnings.warn(
"""The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use YolosImageProcessor instead.""" , SCREAMING_SNAKE_CASE_ , )
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 6 |
'''simple docstring'''
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def _a (lowercase__ : str , lowercase__ : str , lowercase__ : Optional[str] = None ) -> str:
"""simple docstring"""
if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release:
# old versions of hfh don't url-encode the file path
__snake_case = quote(lowercase__ )
return hfh.hf_hub_url(lowercase__ , lowercase__ , repo_type='dataset' , revision=lowercase__ )
| 56 | 0 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> list[int]:
lowerCAmelCase = 0
lowerCAmelCase = len(lowercase__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
lowerCAmelCase = i + 1
else:
lowerCAmelCase = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'{two_pointer([2, 7, 1_1, 1_5], 9) = }')
| 312 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def _a (lowercase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _lowercase ( nn.Module ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : nn.Module , SCREAMING_SNAKE_CASE_ : int ) -> str:
super().__init__()
__snake_case = module
__snake_case = nn.Sequential(
nn.Linear(module.in_features , SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ ) , nn.Linear(SCREAMING_SNAKE_CASE_ , module.out_features , bias=SCREAMING_SNAKE_CASE_ ) , )
__snake_case = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=SCREAMING_SNAKE_CASE_ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Union[str, Any]:
return self.module(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) + self.adapter(SCREAMING_SNAKE_CASE_ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowercase ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
_SCREAMING_SNAKE_CASE : Tuple = "bigscience/bloom-1b7"
# Constant values
_SCREAMING_SNAKE_CASE : Union[str, Any] = 2.109659552692574
_SCREAMING_SNAKE_CASE : Optional[Any] = "Hello my name is"
_SCREAMING_SNAKE_CASE : List[str] = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
_SCREAMING_SNAKE_CASE : Dict = 1_0
def a ( self : Optional[Any] ) -> List[Any]:
# Models and tokenizer
__snake_case = AutoTokenizer.from_pretrained(self.model_name )
class _lowercase ( __lowercase ):
def a ( self : Union[str, Any] ) -> List[str]:
super().setUp()
# Models and tokenizer
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto' )
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
def a ( self : Optional[Any] ) -> Any:
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def a ( self : Optional[Any] ) -> int:
__snake_case = self.model_abit.config
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'quantization_config' ) )
__snake_case = config.to_dict()
__snake_case = config.to_diff_dict()
__snake_case = config.to_json_string()
def a ( self : Optional[Any] ) -> str:
from bitsandbytes.nn import Paramsabit
__snake_case = self.model_fpaa.get_memory_footprint()
__snake_case = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
__snake_case = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def a ( self : Union[str, Any] ) -> Optional[Any]:
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(SCREAMING_SNAKE_CASE_ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def a ( self : Union[str, Any] ) -> int:
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
__snake_case = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) , self.EXPECTED_OUTPUTS )
def a ( self : Optional[Any] ) -> Dict:
__snake_case = BitsAndBytesConfig()
__snake_case = True
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
__snake_case = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) , self.EXPECTED_OUTPUTS )
def a ( self : List[Any] ) -> str:
with self.assertRaises(SCREAMING_SNAKE_CASE_ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(SCREAMING_SNAKE_CASE_ )
def a ( self : Any ) -> Union[str, Any]:
__snake_case = BitsAndBytesConfig()
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=SCREAMING_SNAKE_CASE_ , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' , bnb_abit_quant_type='nf4' , )
def a ( self : Tuple ) -> Dict:
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with `str`
self.model_abit.to('cpu' )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0' ) )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
__snake_case = self.model_fpaa.to(torch.floataa )
__snake_case = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
__snake_case = self.model_fpaa.to('cpu' )
# Check this does not throw an error
__snake_case = self.model_fpaa.half()
# Check this does not throw an error
__snake_case = self.model_fpaa.float()
def a ( self : Tuple ) -> Union[str, Any]:
__snake_case = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowercase ( unittest.TestCase ):
@classmethod
def a ( cls : Union[str, Any] ) -> Dict:
__snake_case = 't5-small'
__snake_case = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
__snake_case = AutoTokenizer.from_pretrained(cls.model_name )
__snake_case = 'Translate in German: Hello, my dog is cute'
def a ( self : List[Any] ) -> str:
gc.collect()
torch.cuda.empty_cache()
def a ( self : int ) -> Optional[Any]:
from transformers import TaForConditionalGeneration
__snake_case = TaForConditionalGeneration._keep_in_fpaa_modules
__snake_case = None
# test with `t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
# test with `flan-t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
__snake_case = modules
def a ( self : List[str] ) -> Any:
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
# test with `flan-t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
class _lowercase ( __lowercase ):
def a ( self : Dict ) -> str:
super().setUp()
# model_name
__snake_case = 'bigscience/bloom-560m'
__snake_case = 't5-small'
# Different types of model
__snake_case = AutoModel.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# Sequence classification model
__snake_case = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# CausalLM model
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# Seq2seq model
__snake_case = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
def a ( self : int ) -> Dict:
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def a ( self : Any ) -> Optional[Any]:
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class _lowercase ( __lowercase ):
def a ( self : str ) -> Union[str, Any]:
super().setUp()
def a ( self : Optional[Any] ) -> str:
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def a ( self : Optional[int] ) -> List[str]:
__snake_case = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__snake_case = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class _lowercase ( __lowercase ):
def a ( self : Optional[int] ) -> Union[str, Any]:
super().setUp()
def a ( self : Optional[int] ) -> List[Any]:
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='balanced' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
# Second real batch
__snake_case = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) , self.EXPECTED_OUTPUTS )
class _lowercase ( __lowercase ):
def a ( self : Any ) -> str:
__snake_case = 'facebook/opt-350m'
super().setUp()
def a ( self : int ) -> List[Any]:
if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ):
return
# Step 1: freeze all parameters
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
__snake_case = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__snake_case = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(SCREAMING_SNAKE_CASE_ ) ):
__snake_case = LoRALayer(module.q_proj , rank=16 )
__snake_case = LoRALayer(module.k_proj , rank=16 )
__snake_case = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
__snake_case = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__snake_case = model.forward(**SCREAMING_SNAKE_CASE_ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(SCREAMING_SNAKE_CASE_ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = "gpt2-xl"
_SCREAMING_SNAKE_CASE : Optional[int] = 3.3191854854152187
| 56 | 0 |
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __snake_case ( __lowercase ):
_a = ["vqvae"]
def __init__( self : List[str] , A_ : AutoencoderKL , A_ : UNetaDConditionModel , A_ : Mel , A_ : Union[DDIMScheduler, DDPMScheduler] , ):
super().__init__()
self.register_modules(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , mel=SCREAMING_SNAKE_CASE_ , vqvae=SCREAMING_SNAKE_CASE_)
def UpperCAmelCase__ ( self : Optional[Any]):
return 5_0 if isinstance(self.scheduler , SCREAMING_SNAKE_CASE_) else 1_0_0_0
@torch.no_grad()
def __call__( self : Optional[int] , A_ : int = 1 , A_ : str = None , A_ : np.ndarray = None , A_ : int = 0 , A_ : int = 0 , A_ : int = None , A_ : torch.Generator = None , A_ : float = 0 , A_ : float = 0 , A_ : torch.Generator = None , A_ : float = 0 , A_ : torch.Tensor = None , A_ : torch.Tensor = None , A_ : Optional[Any]=True , ):
lowerCAmelCase_ : int = steps or self.get_default_steps()
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_)
lowerCAmelCase_ : Dict = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size) == int:
lowerCAmelCase_ : List[str] = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
lowerCAmelCase_ : Dict = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=SCREAMING_SNAKE_CASE_ , device=self.device , )
lowerCAmelCase_ : Any = noise
lowerCAmelCase_ : Optional[Any] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowerCAmelCase_ : Any = self.mel.audio_slice_to_image(SCREAMING_SNAKE_CASE_)
lowerCAmelCase_ : List[Any] = np.frombuffer(input_image.tobytes() , dtype='''uint8''').reshape(
(input_image.height, input_image.width))
lowerCAmelCase_ : Optional[Any] = (input_image / 2_5_5) * 2 - 1
lowerCAmelCase_ : Dict = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float).to(self.device)
if self.vqvae is not None:
lowerCAmelCase_ : List[Any] = self.vqvae.encode(torch.unsqueeze(SCREAMING_SNAKE_CASE_ , 0)).latent_dist.sample(
generator=SCREAMING_SNAKE_CASE_)[0]
lowerCAmelCase_ : int = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
lowerCAmelCase_ : Any = self.scheduler.add_noise(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.scheduler.timesteps[start_step - 1])
lowerCAmelCase_ : str = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
lowerCAmelCase_ : List[Any] = int(mask_start_secs * pixels_per_second)
lowerCAmelCase_ : str = int(mask_end_secs * pixels_per_second)
lowerCAmelCase_ : List[Any] = self.scheduler.add_noise(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , torch.tensor(self.scheduler.timesteps[start_step:]))
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:])):
if isinstance(self.unet , SCREAMING_SNAKE_CASE_):
lowerCAmelCase_ : Dict = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)['''sample''']
else:
lowerCAmelCase_ : Union[str, Any] = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)['''sample''']
if isinstance(self.scheduler , SCREAMING_SNAKE_CASE_):
lowerCAmelCase_ : str = self.scheduler.step(
model_output=SCREAMING_SNAKE_CASE_ , timestep=SCREAMING_SNAKE_CASE_ , sample=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , )['''prev_sample''']
else:
lowerCAmelCase_ : Dict = self.scheduler.step(
model_output=SCREAMING_SNAKE_CASE_ , timestep=SCREAMING_SNAKE_CASE_ , sample=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
lowerCAmelCase_ : Optional[Any] = mask[:, step, :, :mask_start]
if mask_end > 0:
lowerCAmelCase_ : Dict = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
lowerCAmelCase_ : Optional[int] = 1 / self.vqvae.config.scaling_factor * images
lowerCAmelCase_ : Optional[int] = self.vqvae.decode(SCREAMING_SNAKE_CASE_)['''sample''']
lowerCAmelCase_ : Optional[int] = (images / 2 + 0.5).clamp(0 , 1)
lowerCAmelCase_ : Optional[Any] = images.cpu().permute(0 , 2 , 3 , 1).numpy()
lowerCAmelCase_ : Optional[int] = (images * 2_5_5).round().astype('''uint8''')
lowerCAmelCase_ : int = list(
(Image.fromarray(_[:, :, 0]) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(SCREAMING_SNAKE_CASE_ , mode='''RGB''').convert('''L''') for _ in images))
lowerCAmelCase_ : List[Any] = [self.mel.image_to_audio(SCREAMING_SNAKE_CASE_) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(SCREAMING_SNAKE_CASE_)[:, np.newaxis, :]) , **ImagePipelineOutput(SCREAMING_SNAKE_CASE_))
@torch.no_grad()
def UpperCAmelCase__ ( self : List[str] , A_ : List[Image.Image] , A_ : int = 5_0):
assert isinstance(self.scheduler , SCREAMING_SNAKE_CASE_)
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_)
lowerCAmelCase_ : Optional[Any] = np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''').reshape((1, image.height, image.width)) for image in images])
lowerCAmelCase_ : Any = (sample / 2_5_5) * 2 - 1
lowerCAmelCase_ : Optional[Any] = torch.Tensor(SCREAMING_SNAKE_CASE_).to(self.device)
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,))):
lowerCAmelCase_ : str = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
lowerCAmelCase_ : int = self.scheduler.alphas_cumprod[t]
lowerCAmelCase_ : int = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
lowerCAmelCase_ : List[str] = 1 - alpha_prod_t
lowerCAmelCase_ : Optional[int] = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)['''sample''']
lowerCAmelCase_ : Optional[Any] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
lowerCAmelCase_ : Optional[int] = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
lowerCAmelCase_ : Any = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def UpperCAmelCase__ ( A_ : torch.Tensor , A_ : torch.Tensor , A_ : float):
lowerCAmelCase_ : int = acos(torch.dot(torch.flatten(SCREAMING_SNAKE_CASE_) , torch.flatten(SCREAMING_SNAKE_CASE_)) / torch.norm(SCREAMING_SNAKE_CASE_) / torch.norm(SCREAMING_SNAKE_CASE_))
return sin((1 - alpha) * theta) * xa / sin(SCREAMING_SNAKE_CASE_) + sin(alpha * theta) * xa / sin(SCREAMING_SNAKE_CASE_)
| 171 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class _lowercase ( unittest.TestCase ):
def a ( self : int ) -> List[str]:
__snake_case = '| <pad> <unk> <s> </s> a b c d e f g h i j k'.split()
__snake_case = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
__snake_case = {
'unk_token': '<unk>',
'bos_token': '<s>',
'eos_token': '</s>',
}
__snake_case = {
'feature_size': 1,
'padding_value': 0.0,
'sampling_rate': 1_6000,
'return_attention_mask': False,
'do_normalize': True,
}
__snake_case = tempfile.mkdtemp()
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__snake_case = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE_ )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
with open(self.feature_extraction_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
# load decoder from hub
__snake_case = 'hf-internal-testing/ngram-beam-search-decoder'
def a ( self : Optional[int] , **SCREAMING_SNAKE_CASE_ : Tuple ) -> Dict:
__snake_case = self.add_kwargs_tokens_map.copy()
kwargs.update(SCREAMING_SNAKE_CASE_ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] , **SCREAMING_SNAKE_CASE_ : Any ) -> Optional[Any]:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def a ( self : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> Tuple:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **SCREAMING_SNAKE_CASE_ )
def a ( self : int ) -> Dict:
shutil.rmtree(self.tmpdirname )
def a ( self : int ) -> Tuple:
__snake_case = self.get_tokenizer()
__snake_case = self.get_feature_extractor()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
__snake_case = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE_ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , SCREAMING_SNAKE_CASE_ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , SCREAMING_SNAKE_CASE_ )
def a ( self : Dict ) -> Union[str, Any]:
__snake_case = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__snake_case = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def a ( self : str ) -> Tuple:
__snake_case = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['xx'] )
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE_ , 'include' ):
WavaVecaProcessorWithLM(
tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def a ( self : List[str] ) -> List[str]:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = floats_list((3, 1000) )
__snake_case = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
__snake_case = processor(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def a ( self : Tuple ) -> Tuple:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = 'This is a test string'
__snake_case = processor(text=SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer(SCREAMING_SNAKE_CASE_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a ( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any]=(2, 10, 16) , SCREAMING_SNAKE_CASE_ : Dict=77 ) -> Dict:
np.random.seed(SCREAMING_SNAKE_CASE_ )
return np.random.rand(*SCREAMING_SNAKE_CASE_ )
def a ( self : Any ) -> Tuple:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits(shape=(10, 16) , seed=13 )
__snake_case = processor.decode(SCREAMING_SNAKE_CASE_ )
__snake_case = decoder.decode_beams(SCREAMING_SNAKE_CASE_ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('</s> <s> </s>' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['fork'], ['spawn']] )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] ) -> Dict:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__snake_case = processor.batch_decode(SCREAMING_SNAKE_CASE_ )
else:
with get_context(SCREAMING_SNAKE_CASE_ ).Pool() as pool:
__snake_case = processor.batch_decode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = list(SCREAMING_SNAKE_CASE_ )
with get_context('fork' ).Pool() as p:
__snake_case = decoder.decode_beams_batch(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case , __snake_case , __snake_case = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.text )
self.assertListEqual(['<s> <s> </s>', '<s> <s> <s>'] , decoded_processor.text )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.logit_score )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.lm_score )
def a ( self : Any ) -> Dict:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits()
__snake_case = 15
__snake_case = -2_0.0
__snake_case = -4.0
__snake_case = processor.batch_decode(
SCREAMING_SNAKE_CASE_ , beam_width=SCREAMING_SNAKE_CASE_ , beam_prune_logp=SCREAMING_SNAKE_CASE_ , token_min_logp=SCREAMING_SNAKE_CASE_ , )
__snake_case = decoded_processor_out.text
__snake_case = list(SCREAMING_SNAKE_CASE_ )
with get_context('fork' ).Pool() as pool:
__snake_case = decoder.decode_beams_batch(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , beam_width=SCREAMING_SNAKE_CASE_ , beam_prune_logp=SCREAMING_SNAKE_CASE_ , token_min_logp=SCREAMING_SNAKE_CASE_ , )
__snake_case = [d[0][0] for d in decoded_decoder_out]
__snake_case = [d[0][2] for d in decoded_decoder_out]
__snake_case = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(['</s> <s> <s>', '<s> <s> <s>'] , SCREAMING_SNAKE_CASE_ )
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
def a ( self : Optional[Any] ) -> Tuple:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits()
__snake_case = 2.0
__snake_case = 5.0
__snake_case = -2_0.0
__snake_case = True
__snake_case = processor.batch_decode(
SCREAMING_SNAKE_CASE_ , alpha=SCREAMING_SNAKE_CASE_ , beta=SCREAMING_SNAKE_CASE_ , unk_score_offset=SCREAMING_SNAKE_CASE_ , lm_score_boundary=SCREAMING_SNAKE_CASE_ , )
__snake_case = decoded_processor_out.text
__snake_case = list(SCREAMING_SNAKE_CASE_ )
decoder.reset_params(
alpha=SCREAMING_SNAKE_CASE_ , beta=SCREAMING_SNAKE_CASE_ , unk_score_offset=SCREAMING_SNAKE_CASE_ , lm_score_boundary=SCREAMING_SNAKE_CASE_ , )
with get_context('fork' ).Pool() as pool:
__snake_case = decoder.decode_beams_batch(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
__snake_case = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(['<s> </s> <s> </s> </s>', '</s> </s> <s> </s> </s>'] , SCREAMING_SNAKE_CASE_ )
__snake_case = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -2_0.0 )
self.assertEqual(lm_model.score_boundary , SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] ) -> List[str]:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = processor.decoder.model_container[processor.decoder._model_key]
__snake_case = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
__snake_case = os.listdir(SCREAMING_SNAKE_CASE_ )
__snake_case = ['alphabet.json', 'language_model']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Dict ) -> Dict:
__snake_case = snapshot_download('hf-internal-testing/processor_with_lm' )
__snake_case = WavaVecaProcessorWithLM.from_pretrained(SCREAMING_SNAKE_CASE_ )
__snake_case = processor.decoder.model_container[processor.decoder._model_key]
__snake_case = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
__snake_case = os.listdir(SCREAMING_SNAKE_CASE_ )
__snake_case = os.listdir(SCREAMING_SNAKE_CASE_ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Any ) -> List[Any]:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = AutoProcessor.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = floats_list((3, 1000) )
__snake_case = processor_wavaveca(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
__snake_case = processor_auto(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
__snake_case = self._get_dummy_logits()
__snake_case = processor_wavaveca.batch_decode(SCREAMING_SNAKE_CASE_ )
__snake_case = processor_auto.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def a ( self : Dict ) -> Optional[int]:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
@staticmethod
def a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int:
__snake_case = [d[key] for d in offsets]
return retrieved_list
def a ( self : Optional[int] ) -> str:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = self._get_dummy_logits()[0]
__snake_case = processor.decode(SCREAMING_SNAKE_CASE_ , output_word_offsets=SCREAMING_SNAKE_CASE_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(' '.join(self.get_from_offsets(outputs['word_offsets'] , 'word' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'end_offset' ) , [1, 3, 5] )
def a ( self : Optional[Any] ) -> Optional[int]:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = self._get_dummy_logits()
__snake_case = processor.batch_decode(SCREAMING_SNAKE_CASE_ , output_word_offsets=SCREAMING_SNAKE_CASE_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(
[' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) for o in outputs['word_offsets']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'end_offset' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def a ( self : Optional[Any] ) -> Optional[Any]:
import torch
__snake_case = load_dataset('common_voice' , 'en' , split='train' , streaming=SCREAMING_SNAKE_CASE_ )
__snake_case = ds.cast_column('audio' , datasets.Audio(sampling_rate=1_6000 ) )
__snake_case = iter(SCREAMING_SNAKE_CASE_ )
__snake_case = next(SCREAMING_SNAKE_CASE_ )
__snake_case = AutoProcessor.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
__snake_case = WavaVecaForCTC.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__snake_case = processor(sample['audio']['array'] , return_tensors='pt' ).input_values
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ ).logits.cpu().numpy()
__snake_case = processor.decode(logits[0] , output_word_offsets=SCREAMING_SNAKE_CASE_ )
__snake_case = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__snake_case = [
{
'start_time': d['start_offset'] * time_offset,
'end_time': d['end_offset'] * time_offset,
'word': d['word'],
}
for d in output['word_offsets']
]
__snake_case = 'WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'
# output words
self.assertEqual(' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) , output.text )
# output times
__snake_case = torch.tensor(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'start_time' ) )
__snake_case = torch.tensor(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'end_time' ) )
# fmt: off
__snake_case = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
__snake_case = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=0.0_1 ) )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=0.0_1 ) )
| 56 | 0 |
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self :int):
"""simple docstring"""
_lowercase ='| <pad> <unk> <s> </s> a b c d e f g h i j k'.split()
_lowercase =dict(zip(SCREAMING_SNAKE_CASE_, range(len(SCREAMING_SNAKE_CASE_))))
_lowercase ={
'unk_token': '<unk>',
'bos_token': '<s>',
'eos_token': '</s>',
}
_lowercase ={
'feature_size': 1,
'padding_value': 0.0,
'sampling_rate': 1_6000,
'return_attention_mask': False,
'do_normalize': True,
}
_lowercase =tempfile.mkdtemp()
_lowercase =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
_lowercase =os.path.join(self.tmpdirname, SCREAMING_SNAKE_CASE_)
with open(self.vocab_file, 'w', encoding='utf-8') as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_) + '\n')
with open(self.feature_extraction_file, 'w', encoding='utf-8') as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_) + '\n')
# load decoder from hub
_lowercase ='hf-internal-testing/ngram-beam-search-decoder'
def UpperCamelCase__ ( self :Optional[int], **snake_case :Tuple):
"""simple docstring"""
_lowercase =self.add_kwargs_tokens_map.copy()
kwargs.update(SCREAMING_SNAKE_CASE_)
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname, **SCREAMING_SNAKE_CASE_)
def UpperCamelCase__ ( self :Optional[Any], **snake_case :Any):
"""simple docstring"""
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname, **SCREAMING_SNAKE_CASE_)
def UpperCamelCase__ ( self :Union[str, Any], **snake_case :List[Any]):
"""simple docstring"""
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name, **SCREAMING_SNAKE_CASE_)
def UpperCamelCase__ ( self :int):
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def UpperCamelCase__ ( self :int):
"""simple docstring"""
_lowercase =self.get_tokenizer()
_lowercase =self.get_feature_extractor()
_lowercase =self.get_decoder()
_lowercase =WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_, feature_extractor=SCREAMING_SNAKE_CASE_, decoder=SCREAMING_SNAKE_CASE_)
processor.save_pretrained(self.tmpdirname)
_lowercase =WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname)
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab())
self.assertIsInstance(processor.tokenizer, SCREAMING_SNAKE_CASE_)
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string())
self.assertIsInstance(processor.feature_extractor, SCREAMING_SNAKE_CASE_)
# decoder
self.assertEqual(processor.decoder._alphabet.labels, decoder._alphabet.labels)
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set, decoder.model_container[decoder._model_key]._unigram_set, )
self.assertIsInstance(processor.decoder, SCREAMING_SNAKE_CASE_)
def UpperCamelCase__ ( self :Dict):
"""simple docstring"""
_lowercase =WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor(), decoder=self.get_decoder())
processor.save_pretrained(self.tmpdirname)
# make sure that error is thrown when decoder alphabet doesn't match
_lowercase =WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname, alpha=5.0, beta=3.0, score_boundary=-7.0, unk_score_offset=3)
# decoder
self.assertEqual(processor.language_model.alpha, 5.0)
self.assertEqual(processor.language_model.beta, 3.0)
self.assertEqual(processor.language_model.score_boundary, -7.0)
self.assertEqual(processor.language_model.unk_score_offset, 3)
def UpperCamelCase__ ( self :str):
"""simple docstring"""
_lowercase =self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['xx'])
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE_, 'include'):
WavaVecaProcessorWithLM(
tokenizer=SCREAMING_SNAKE_CASE_, feature_extractor=self.get_feature_extractor(), decoder=self.get_decoder())
def UpperCamelCase__ ( self :List[str]):
"""simple docstring"""
_lowercase =self.get_feature_extractor()
_lowercase =self.get_tokenizer()
_lowercase =self.get_decoder()
_lowercase =WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_, feature_extractor=SCREAMING_SNAKE_CASE_, decoder=SCREAMING_SNAKE_CASE_)
_lowercase =floats_list((3, 1000))
_lowercase =feature_extractor(SCREAMING_SNAKE_CASE_, return_tensors='np')
_lowercase =processor(SCREAMING_SNAKE_CASE_, return_tensors='np')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2)
def UpperCamelCase__ ( self :Tuple):
"""simple docstring"""
_lowercase =self.get_feature_extractor()
_lowercase =self.get_tokenizer()
_lowercase =self.get_decoder()
_lowercase =WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_, feature_extractor=SCREAMING_SNAKE_CASE_, decoder=SCREAMING_SNAKE_CASE_)
_lowercase ='This is a test string'
_lowercase =processor(text=SCREAMING_SNAKE_CASE_)
_lowercase =tokenizer(SCREAMING_SNAKE_CASE_)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key])
def UpperCamelCase__ ( self :Any, snake_case :Union[str, Any]=(2, 10, 16), snake_case :Dict=77):
"""simple docstring"""
np.random.seed(SCREAMING_SNAKE_CASE_)
return np.random.rand(*SCREAMING_SNAKE_CASE_)
def UpperCamelCase__ ( self :Any):
"""simple docstring"""
_lowercase =self.get_feature_extractor()
_lowercase =self.get_tokenizer()
_lowercase =self.get_decoder()
_lowercase =WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_, feature_extractor=SCREAMING_SNAKE_CASE_, decoder=SCREAMING_SNAKE_CASE_)
_lowercase =self._get_dummy_logits(shape=(10, 16), seed=13)
_lowercase =processor.decode(SCREAMING_SNAKE_CASE_)
_lowercase =decoder.decode_beams(SCREAMING_SNAKE_CASE_)[0]
self.assertEqual(decoded_decoder[0], decoded_processor.text)
self.assertEqual('</s> <s> </s>', decoded_processor.text)
self.assertEqual(decoded_decoder[-2], decoded_processor.logit_score)
self.assertEqual(decoded_decoder[-1], decoded_processor.lm_score)
@parameterized.expand([[None], ['fork'], ['spawn']])
def UpperCamelCase__ ( self :Optional[Any], snake_case :List[str]):
"""simple docstring"""
_lowercase =self.get_feature_extractor()
_lowercase =self.get_tokenizer()
_lowercase =self.get_decoder()
_lowercase =WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_, feature_extractor=SCREAMING_SNAKE_CASE_, decoder=SCREAMING_SNAKE_CASE_)
_lowercase =self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
_lowercase =processor.batch_decode(SCREAMING_SNAKE_CASE_)
else:
with get_context(SCREAMING_SNAKE_CASE_).Pool() as pool:
_lowercase =processor.batch_decode(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_)
_lowercase =list(SCREAMING_SNAKE_CASE_)
with get_context('fork').Pool() as p:
_lowercase =decoder.decode_beams_batch(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_)
_lowercase , _lowercase , _lowercase =[], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0])
logit_scores_decoder.append(beams[0][-2])
lm_scores_decoder.append(beams[0][-1])
self.assertListEqual(SCREAMING_SNAKE_CASE_, decoded_processor.text)
self.assertListEqual(['<s> <s> </s>', '<s> <s> <s>'], decoded_processor.text)
self.assertListEqual(SCREAMING_SNAKE_CASE_, decoded_processor.logit_score)
self.assertListEqual(SCREAMING_SNAKE_CASE_, decoded_processor.lm_score)
def UpperCamelCase__ ( self :Any):
"""simple docstring"""
_lowercase =self.get_feature_extractor()
_lowercase =self.get_tokenizer()
_lowercase =self.get_decoder()
_lowercase =WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_, feature_extractor=SCREAMING_SNAKE_CASE_, decoder=SCREAMING_SNAKE_CASE_)
_lowercase =self._get_dummy_logits()
_lowercase =15
_lowercase =-20.0
_lowercase =-4.0
_lowercase =processor.batch_decode(
SCREAMING_SNAKE_CASE_, beam_width=SCREAMING_SNAKE_CASE_, beam_prune_logp=SCREAMING_SNAKE_CASE_, token_min_logp=SCREAMING_SNAKE_CASE_, )
_lowercase =decoded_processor_out.text
_lowercase =list(SCREAMING_SNAKE_CASE_)
with get_context('fork').Pool() as pool:
_lowercase =decoder.decode_beams_batch(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, beam_width=SCREAMING_SNAKE_CASE_, beam_prune_logp=SCREAMING_SNAKE_CASE_, token_min_logp=SCREAMING_SNAKE_CASE_, )
_lowercase =[d[0][0] for d in decoded_decoder_out]
_lowercase =[d[0][2] for d in decoded_decoder_out]
_lowercase =[d[0][3] for d in decoded_decoder_out]
self.assertListEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_)
self.assertListEqual(['</s> <s> <s>', '<s> <s> <s>'], SCREAMING_SNAKE_CASE_)
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_, decoded_processor_out.logit_score))
self.assertTrue(np.allclose([-20.054, -18.447], SCREAMING_SNAKE_CASE_, atol=1e-3))
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_, decoded_processor_out.lm_score))
self.assertTrue(np.allclose([-15.554, -13.9474], SCREAMING_SNAKE_CASE_, atol=1e-3))
def UpperCamelCase__ ( self :Optional[Any]):
"""simple docstring"""
_lowercase =self.get_feature_extractor()
_lowercase =self.get_tokenizer()
_lowercase =self.get_decoder()
_lowercase =WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_, feature_extractor=SCREAMING_SNAKE_CASE_, decoder=SCREAMING_SNAKE_CASE_)
_lowercase =self._get_dummy_logits()
_lowercase =2.0
_lowercase =5.0
_lowercase =-20.0
_lowercase =True
_lowercase =processor.batch_decode(
SCREAMING_SNAKE_CASE_, alpha=SCREAMING_SNAKE_CASE_, beta=SCREAMING_SNAKE_CASE_, unk_score_offset=SCREAMING_SNAKE_CASE_, lm_score_boundary=SCREAMING_SNAKE_CASE_, )
_lowercase =decoded_processor_out.text
_lowercase =list(SCREAMING_SNAKE_CASE_)
decoder.reset_params(
alpha=SCREAMING_SNAKE_CASE_, beta=SCREAMING_SNAKE_CASE_, unk_score_offset=SCREAMING_SNAKE_CASE_, lm_score_boundary=SCREAMING_SNAKE_CASE_, )
with get_context('fork').Pool() as pool:
_lowercase =decoder.decode_beams_batch(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, )
_lowercase =[d[0][0] for d in decoded_decoder_out]
self.assertListEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_)
self.assertListEqual(['<s> </s> <s> </s> </s>', '</s> </s> <s> </s> </s>'], SCREAMING_SNAKE_CASE_)
_lowercase =processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha, 2.0)
self.assertEqual(lm_model.beta, 5.0)
self.assertEqual(lm_model.unk_score_offset, -20.0)
self.assertEqual(lm_model.score_boundary, SCREAMING_SNAKE_CASE_)
def UpperCamelCase__ ( self :Optional[Any]):
"""simple docstring"""
_lowercase =WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm')
_lowercase =processor.decoder.model_container[processor.decoder._model_key]
_lowercase =Path(language_model._kenlm_model.path.decode('utf-8')).parent.parent.absolute()
_lowercase =os.listdir(SCREAMING_SNAKE_CASE_)
_lowercase =['alphabet.json', 'language_model']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_)
def UpperCamelCase__ ( self :Dict):
"""simple docstring"""
_lowercase =snapshot_download('hf-internal-testing/processor_with_lm')
_lowercase =WavaVecaProcessorWithLM.from_pretrained(SCREAMING_SNAKE_CASE_)
_lowercase =processor.decoder.model_container[processor.decoder._model_key]
_lowercase =Path(language_model._kenlm_model.path.decode('utf-8')).parent.parent.absolute()
_lowercase =os.listdir(SCREAMING_SNAKE_CASE_)
_lowercase =os.listdir(SCREAMING_SNAKE_CASE_)
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_)
def UpperCamelCase__ ( self :Any):
"""simple docstring"""
_lowercase =WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm')
_lowercase =AutoProcessor.from_pretrained('hf-internal-testing/processor_with_lm')
_lowercase =floats_list((3, 1000))
_lowercase =processor_wavaveca(SCREAMING_SNAKE_CASE_, return_tensors='np')
_lowercase =processor_auto(SCREAMING_SNAKE_CASE_, return_tensors='np')
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum(), input_auto[key].sum(), delta=1e-2)
_lowercase =self._get_dummy_logits()
_lowercase =processor_wavaveca.batch_decode(SCREAMING_SNAKE_CASE_)
_lowercase =processor_auto.batch_decode(SCREAMING_SNAKE_CASE_)
self.assertListEqual(decoded_wavaveca.text, decoded_auto.text)
def UpperCamelCase__ ( self :Dict):
"""simple docstring"""
_lowercase =self.get_feature_extractor()
_lowercase =self.get_tokenizer()
_lowercase =self.get_decoder()
_lowercase =WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_, feature_extractor=SCREAMING_SNAKE_CASE_, decoder=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
processor.model_input_names, feature_extractor.model_input_names, msg='`processor` and `feature_extractor` model input names do not match', )
@staticmethod
def UpperCamelCase__ ( snake_case :Optional[int], snake_case :Optional[int]):
"""simple docstring"""
_lowercase =[d[key] for d in offsets]
return retrieved_list
def UpperCamelCase__ ( self :Optional[int]):
"""simple docstring"""
_lowercase =WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm')
_lowercase =self._get_dummy_logits()[0]
_lowercase =processor.decode(SCREAMING_SNAKE_CASE_, output_word_offsets=SCREAMING_SNAKE_CASE_)
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys()), 4)
self.assertTrue('text' in outputs)
self.assertTrue('word_offsets' in outputs)
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_))
self.assertEqual(' '.join(self.get_from_offsets(outputs['word_offsets'], 'word')), outputs.text)
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'], 'word'), ['<s>', '<s>', '</s>'])
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'], 'start_offset'), [0, 2, 4])
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'], 'end_offset'), [1, 3, 5])
def UpperCamelCase__ ( self :Optional[Any]):
"""simple docstring"""
_lowercase =WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm')
_lowercase =self._get_dummy_logits()
_lowercase =processor.batch_decode(SCREAMING_SNAKE_CASE_, output_word_offsets=SCREAMING_SNAKE_CASE_)
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys()), 4)
self.assertTrue('text' in outputs)
self.assertTrue('word_offsets' in outputs)
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_))
self.assertListEqual(
[' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_, 'word')) for o in outputs['word_offsets']], outputs.text)
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0], 'word'), ['<s>', '<s>', '</s>'])
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0], 'start_offset'), [0, 2, 4])
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0], 'end_offset'), [1, 3, 5])
@slow
@require_torch
@require_torchaudio
def UpperCamelCase__ ( self :Optional[Any]):
"""simple docstring"""
import torch
_lowercase =load_dataset('common_voice', 'en', split='train', streaming=SCREAMING_SNAKE_CASE_)
_lowercase =ds.cast_column('audio', datasets.Audio(sampling_rate=1_6000))
_lowercase =iter(SCREAMING_SNAKE_CASE_)
_lowercase =next(SCREAMING_SNAKE_CASE_)
_lowercase =AutoProcessor.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm')
_lowercase =WavaVecaForCTC.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm')
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
_lowercase =processor(sample['audio']['array'], return_tensors='pt').input_values
with torch.no_grad():
_lowercase =model(SCREAMING_SNAKE_CASE_).logits.cpu().numpy()
_lowercase =processor.decode(logits[0], output_word_offsets=SCREAMING_SNAKE_CASE_)
_lowercase =model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
_lowercase =[
{
'start_time': d['start_offset'] * time_offset,
'end_time': d['end_offset'] * time_offset,
'word': d['word'],
}
for d in output['word_offsets']
]
_lowercase ='WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'
# output words
self.assertEqual(' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_, 'word')), SCREAMING_SNAKE_CASE_)
self.assertEqual(' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_, 'word')), output.text)
# output times
_lowercase =torch.tensor(self.get_from_offsets(SCREAMING_SNAKE_CASE_, 'start_time'))
_lowercase =torch.tensor(self.get_from_offsets(SCREAMING_SNAKE_CASE_, 'end_time'))
# fmt: off
_lowercase =torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9])
_lowercase =torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4])
# fmt: on
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=0.0_1))
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=0.0_1))
| 181 |
'''simple docstring'''
def _a (lowercase__ : int , lowercase__ : int ) -> float:
"""simple docstring"""
return base * power(lowercase__ , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print("Raise base to the power of exponent using recursion...")
_a : Union[str, Any] = int(input("Enter the base: ").strip())
_a : Any = int(input("Enter the exponent: ").strip())
_a : List[str] = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
_a : List[Any] = 1 / result
print(f'''{base} to the power of {exponent} is {result}''')
| 56 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"BAAI/AltCLIP": "https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class __magic_name__ ( __lowercase ):
"""simple docstring"""
lowerCAmelCase : int = "altclip_text_model"
def __init__( self : Optional[Any] , _lowercase : List[Any]=250_002 , _lowercase : List[Any]=1_024 , _lowercase : int=24 , _lowercase : List[str]=16 , _lowercase : List[Any]=4_096 , _lowercase : Union[str, Any]="gelu" , _lowercase : List[str]=0.1 , _lowercase : List[Any]=0.1 , _lowercase : List[str]=514 , _lowercase : Tuple=1 , _lowercase : List[Any]=0.02 , _lowercase : Tuple=0.02 , _lowercase : Optional[int]=1E-05 , _lowercase : List[str]=1 , _lowercase : int=0 , _lowercase : Tuple=2 , _lowercase : List[Any]="absolute" , _lowercase : Union[str, Any]=True , _lowercase : Optional[Any]=768 , **_lowercase : List[Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
_UpperCamelCase: Optional[int] = vocab_size
_UpperCamelCase: Union[str, Any] = hidden_size
_UpperCamelCase: Union[str, Any] = num_hidden_layers
_UpperCamelCase: Optional[int] = num_attention_heads
_UpperCamelCase: Tuple = hidden_act
_UpperCamelCase: List[str] = intermediate_size
_UpperCamelCase: Dict = hidden_dropout_prob
_UpperCamelCase: Optional[int] = attention_probs_dropout_prob
_UpperCamelCase: Optional[Any] = max_position_embeddings
_UpperCamelCase: Optional[Any] = type_vocab_size
_UpperCamelCase: Tuple = initializer_range
_UpperCamelCase: int = initializer_factor
_UpperCamelCase: Dict = layer_norm_eps
_UpperCamelCase: Optional[Any] = position_embedding_type
_UpperCamelCase: Union[str, Any] = use_cache
_UpperCamelCase: int = project_dim
class __magic_name__ ( __lowercase ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = "altclip_vision_model"
def __init__( self : List[Any] , _lowercase : Tuple=768 , _lowercase : Optional[int]=3_072 , _lowercase : List[str]=512 , _lowercase : Dict=12 , _lowercase : Optional[int]=12 , _lowercase : Union[str, Any]=3 , _lowercase : Tuple=224 , _lowercase : Any=32 , _lowercase : Any="quick_gelu" , _lowercase : Optional[int]=1E-5 , _lowercase : Union[str, Any]=0.0 , _lowercase : Tuple=0.02 , _lowercase : Any=1.0 , **_lowercase : List[Any] , ):
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE_ )
_UpperCamelCase: str = hidden_size
_UpperCamelCase: List[Any] = intermediate_size
_UpperCamelCase: Tuple = projection_dim
_UpperCamelCase: List[str] = num_hidden_layers
_UpperCamelCase: Optional[int] = num_attention_heads
_UpperCamelCase: Optional[int] = num_channels
_UpperCamelCase: List[str] = patch_size
_UpperCamelCase: Union[str, Any] = image_size
_UpperCamelCase: Any = initializer_range
_UpperCamelCase: Optional[int] = initializer_factor
_UpperCamelCase: Any = attention_dropout
_UpperCamelCase: Dict = layer_norm_eps
_UpperCamelCase: Union[str, Any] = hidden_act
@classmethod
def lowerCAmelCase ( cls : str , _lowercase : Union[str, os.PathLike] , **_lowercase : List[str] ):
"""simple docstring"""
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE_ )
_UpperCamelCase , _UpperCamelCase: str = cls.get_config_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get('''model_type''' ) == "altclip":
_UpperCamelCase: Dict = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
class __magic_name__ ( __lowercase ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = "altclip"
lowerCAmelCase : str = True
def __init__( self : Dict , _lowercase : int=None , _lowercase : Union[str, Any]=None , _lowercase : Any=768 , _lowercase : Union[str, Any]=2.6592 , **_lowercase : Dict ):
"""simple docstring"""
_UpperCamelCase: Tuple = kwargs.pop('''text_config_dict''' , SCREAMING_SNAKE_CASE_ )
_UpperCamelCase: str = kwargs.pop('''vision_config_dict''' , SCREAMING_SNAKE_CASE_ )
super().__init__(**SCREAMING_SNAKE_CASE_ )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
_UpperCamelCase: Tuple = {}
# This is the complete result when using `text_config_dict`.
_UpperCamelCase: Optional[int] = AltCLIPTextConfig(**SCREAMING_SNAKE_CASE_ ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
_UpperCamelCase: str = (
f"""`{key}` is found in both `text_config_dict` and `text_config` but with different values. """
f"""The value `text_config_dict[\"{key}\"]` will be used instead."""
)
# If inferred from default argument values (just to be super careful)
else:
_UpperCamelCase: str = (
f"""`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The """
f"""value `text_config[\"{key}\"]` will be overriden."""
)
logger.warning(SCREAMING_SNAKE_CASE_ )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
_UpperCamelCase: List[str] = {}
# This is the complete result when using `vision_config_dict`.
_UpperCamelCase: Optional[int] = AltCLIPVisionConfig(**SCREAMING_SNAKE_CASE_ ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
_UpperCamelCase: Optional[int] = {
str(SCREAMING_SNAKE_CASE_ ): value for key, value in _vision_config_dict['''id2label'''].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
_UpperCamelCase: Optional[int] = (
f"""`{key}` is found in both `vision_config_dict` and `vision_config` but with different """
f"""values. The value `vision_config_dict[\"{key}\"]` will be used instead."""
)
# If inferred from default argument values (just to be super careful)
else:
_UpperCamelCase: Dict = (
f"""`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. """
f"""The value `vision_config[\"{key}\"]` will be overriden."""
)
logger.warning(SCREAMING_SNAKE_CASE_ )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
_UpperCamelCase: Optional[Any] = {}
logger.info('''`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.''' )
if vision_config is None:
_UpperCamelCase: List[Any] = {}
logger.info('''`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.''' )
_UpperCamelCase: int = AltCLIPTextConfig(**SCREAMING_SNAKE_CASE_ )
_UpperCamelCase: List[Any] = AltCLIPVisionConfig(**SCREAMING_SNAKE_CASE_ )
_UpperCamelCase: Union[str, Any] = projection_dim
_UpperCamelCase: str = logit_scale_init_value
_UpperCamelCase: List[str] = 1.0
@classmethod
def lowerCAmelCase ( cls : Dict , _lowercase : AltCLIPTextConfig , _lowercase : AltCLIPVisionConfig , **_lowercase : Any ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
_UpperCamelCase: Any = copy.deepcopy(self.__dict__ )
_UpperCamelCase: Optional[Any] = self.text_config.to_dict()
_UpperCamelCase: Optional[Any] = self.vision_config.to_dict()
_UpperCamelCase: int = self.__class__.model_type
return output
| 271 |
'''simple docstring'''
import math
from collections.abc import Callable
def _a (lowercase__ : Callable[[float], float] , lowercase__ : float , lowercase__ : float ) -> float:
"""simple docstring"""
__snake_case = xa
__snake_case = xa
while True:
if x_n == x_na or function(lowercase__ ) == function(lowercase__ ):
raise ZeroDivisionError('float division by zero, could not find root' )
__snake_case = x_na - (
function(lowercase__ ) / ((function(lowercase__ ) - function(lowercase__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 1_0**-5:
return x_na
__snake_case = x_na
__snake_case = x_na
def _a (lowercase__ : float ) -> float:
"""simple docstring"""
return math.pow(lowercase__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 56 | 0 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
with open(lowercase__ ) as metadata_file:
_UpperCAmelCase = json.load(lowercase__ )
_UpperCAmelCase = LukeConfig(use_entity_aware_attention=lowercase__ , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
_UpperCAmelCase = torch.load(lowercase__ , map_location="cpu" )
# Load the entity vocab file
_UpperCAmelCase = load_entity_vocab(lowercase__ )
_UpperCAmelCase = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
_UpperCAmelCase = AddedToken("<ent>" , lstrip=lowercase__ , rstrip=lowercase__ )
_UpperCAmelCase = AddedToken("<ent2>" , lstrip=lowercase__ , rstrip=lowercase__ )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(lowercase__ )
with open(os.path.join(lowercase__ , LukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(lowercase__ , lowercase__ )
_UpperCAmelCase = LukeTokenizer.from_pretrained(lowercase__ )
# Initialize the embeddings of the special tokens
_UpperCAmelCase = state_dict["embeddings.word_embeddings.weight"]
_UpperCAmelCase = word_emb[tokenizer.convert_tokens_to_ids(["@"] )[0]].unsqueeze(0 )
_UpperCAmelCase = word_emb[tokenizer.convert_tokens_to_ids(["#"] )[0]].unsqueeze(0 )
_UpperCAmelCase = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_UpperCAmelCase = f"encoder.layer.{layer_index}.attention.self."
_UpperCAmelCase = state_dict[prefix + matrix_name]
_UpperCAmelCase = state_dict[prefix + matrix_name]
_UpperCAmelCase = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_UpperCAmelCase = state_dict["entity_embeddings.entity_embeddings.weight"]
_UpperCAmelCase = entity_emb[entity_vocab["[MASK]"]]
_UpperCAmelCase = LukeModel(config=lowercase__ ).eval()
_UpperCAmelCase , _UpperCAmelCase = model.load_state_dict(lowercase__ , strict=lowercase__ )
if not (len(lowercase__ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f"Missing keys {', '.join(lowercase__ )}. Expected only missing embeddings.position_ids" )
if not (all(key.startswith("entity_predictions" ) or key.startswith("lm_head" ) for key in unexpected_keys )):
raise ValueError(
"Unexpected keys"
f" {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions' ) or key.startswith('lm_head' ))] )}" )
# Check outputs
_UpperCAmelCase = LukeTokenizer.from_pretrained(lowercase__ , task="entity_classification" )
_UpperCAmelCase = (
"Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"
" new world number one avoid a humiliating second- round exit at Wimbledon ."
)
_UpperCAmelCase = (39, 42)
_UpperCAmelCase = tokenizer(lowercase__ , entity_spans=[span] , add_prefix_space=lowercase__ , return_tensors="pt" )
_UpperCAmelCase = model(**lowercase__ )
# Verify word hidden states
if model_size == "large":
_UpperCAmelCase = torch.Size((1, 42, 1024) )
_UpperCAmelCase = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
_UpperCAmelCase = torch.Size((1, 42, 768) )
_UpperCAmelCase = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase__ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
_UpperCAmelCase = torch.Size((1, 1, 1024) )
_UpperCAmelCase = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
_UpperCAmelCase = torch.Size((1, 1, 768) )
_UpperCAmelCase = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
f" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowercase__ , atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(lowercase__ ) )
model.save_pretrained(lowercase__ )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = {}
with open(lowercase__ , "r" , encoding="utf-8" ) as f:
for index, line in enumerate(lowercase__ ):
_UpperCAmelCase , _UpperCAmelCase = line.rstrip().split("\t" )
_UpperCAmelCase = index
return entity_vocab
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
__magic_name__ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 657 |
'''simple docstring'''
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class _lowercase ( __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : str = CpmAntTokenizer
_SCREAMING_SNAKE_CASE : Optional[Any] = False
def a ( self : Optional[Any] ) -> Any:
super().setUp()
__snake_case = [
'<d>',
'</d>',
'<s>',
'</s>',
'</_>',
'<unk>',
'<pad>',
'</n>',
'我',
'是',
'C',
'P',
'M',
'A',
'n',
't',
]
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
@tooslow
def a ( self : List[Any] ) -> Dict:
__snake_case = CpmAntTokenizer.from_pretrained('openbmb/cpm-ant-10b' )
__snake_case = '今天天气真好!'
__snake_case = ['今天', '天气', '真', '好', '!']
__snake_case = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = '今天天气真好!'
__snake_case = [tokenizer.bos_token] + tokens
__snake_case = [6, 9802, 1_4962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 56 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __UpperCamelCase (__lowercase , unittest.TestCase ):
__A = ShapEPipeline
__A = ["prompt"]
__A = ["prompt"]
__A = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
__A = False
@property
def _a ( self ) -> Optional[int]:
'''simple docstring'''
return 32
@property
def _a ( self ) -> List[Any]:
'''simple docstring'''
return 32
@property
def _a ( self ) -> List[str]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
return 8
@property
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def _a ( self ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
lowercase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(SCREAMING_SNAKE_CASE_ )
@property
def _a ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowercase = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
lowercase = PriorTransformer(**SCREAMING_SNAKE_CASE_ )
return model
@property
def _a ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowercase = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
lowercase = ShapERenderer(**SCREAMING_SNAKE_CASE_ )
return model
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = self.dummy_prior
lowercase = self.dummy_text_encoder
lowercase = self.dummy_tokenizer
lowercase = self.dummy_renderer
lowercase = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=SCREAMING_SNAKE_CASE_ , clip_sample=SCREAMING_SNAKE_CASE_ , clip_sample_range=1.0 , )
lowercase = {
"""prior""": prior,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=0 ) -> Union[str, Any]:
'''simple docstring'''
if str(SCREAMING_SNAKE_CASE_ ).startswith("""mps""" ):
lowercase = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
lowercase = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
lowercase = {
"""prompt""": """horse""",
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def _a ( self ) -> str:
'''simple docstring'''
lowercase = """cpu"""
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
lowercase = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowercase = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) )
lowercase = output.images[0]
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowercase = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> List[str]:
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = torch_device == """cpu"""
lowercase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=SCREAMING_SNAKE_CASE_ , relax_max_difference=SCREAMING_SNAKE_CASE_ , )
def _a ( self ) -> str:
'''simple docstring'''
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
lowercase = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowercase = 1
lowercase = 2
lowercase = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
for key in inputs.keys():
if key in self.batch_params:
lowercase = batch_size * [inputs[key]]
lowercase = pipe(**SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __UpperCamelCase (unittest.TestCase ):
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_np_out.npy""" )
lowercase = ShapEPipeline.from_pretrained("""openai/shap-e""" )
lowercase = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowercase = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
lowercase = pipe(
"""a shark""" , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 588 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def _a (lowercase__ : list ) -> int:
"""simple docstring"""
if not postfix_notation:
return 0
__snake_case = {'+', '-', '*', '/'}
__snake_case = []
for token in postfix_notation:
if token in operations:
__snake_case , __snake_case = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(lowercase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class a ( unittest.TestCase ):
snake_case_ = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
snake_case_ = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def A_ ( self : Optional[int] , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : Optional[int] ):
snake_case_ = AudioClassificationPipeline(model=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ )
# test with a raw waveform
snake_case_ = np.zeros((3_4000,) )
snake_case_ = np.zeros((1_4000,) )
return audio_classifier, [audioa, audio]
def A_ ( self : Tuple , lowercase_ : List[str] , lowercase_ : List[str] ):
snake_case_ ,snake_case_ = examples
snake_case_ = audio_classifier(SCREAMING_SNAKE_CASE_ )
# by default a model is initialized with num_labels=2
self.assertEqual(
SCREAMING_SNAKE_CASE_ , [
{'''score''': ANY(SCREAMING_SNAKE_CASE_ ), '''label''': ANY(SCREAMING_SNAKE_CASE_ )},
{'''score''': ANY(SCREAMING_SNAKE_CASE_ ), '''label''': ANY(SCREAMING_SNAKE_CASE_ )},
] , )
snake_case_ = audio_classifier(SCREAMING_SNAKE_CASE_ , top_k=1 )
self.assertEqual(
SCREAMING_SNAKE_CASE_ , [
{'''score''': ANY(SCREAMING_SNAKE_CASE_ ), '''label''': ANY(SCREAMING_SNAKE_CASE_ )},
] , )
self.run_torchaudio(SCREAMING_SNAKE_CASE_ )
@require_torchaudio
def A_ ( self : List[Any] , lowercase_ : Union[str, Any] ):
import datasets
# test with a local file
snake_case_ = datasets.load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
snake_case_ = dataset[0]['''audio''']['''array''']
snake_case_ = audio_classifier(SCREAMING_SNAKE_CASE_ )
self.assertEqual(
SCREAMING_SNAKE_CASE_ , [
{'''score''': ANY(SCREAMING_SNAKE_CASE_ ), '''label''': ANY(SCREAMING_SNAKE_CASE_ )},
{'''score''': ANY(SCREAMING_SNAKE_CASE_ ), '''label''': ANY(SCREAMING_SNAKE_CASE_ )},
] , )
@require_torch
def A_ ( self : Union[str, Any] ):
snake_case_ = '''anton-l/wav2vec2-random-tiny-classifier'''
snake_case_ = pipeline('''audio-classification''' , model=SCREAMING_SNAKE_CASE_ )
snake_case_ = np.ones((8000,) )
snake_case_ = audio_classifier(SCREAMING_SNAKE_CASE_ , top_k=4 )
snake_case_ = [
{'''score''': 0.0842, '''label''': '''no'''},
{'''score''': 0.0838, '''label''': '''up'''},
{'''score''': 0.0837, '''label''': '''go'''},
{'''score''': 0.0834, '''label''': '''right'''},
]
snake_case_ = [
{'''score''': 0.0845, '''label''': '''stop'''},
{'''score''': 0.0844, '''label''': '''on'''},
{'''score''': 0.0841, '''label''': '''right'''},
{'''score''': 0.0834, '''label''': '''left'''},
]
self.assertIn(nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
snake_case_ = {'''array''': np.ones((8000,) ), '''sampling_rate''': audio_classifier.feature_extractor.sampling_rate}
snake_case_ = audio_classifier(SCREAMING_SNAKE_CASE_ , top_k=4 )
self.assertIn(nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def A_ ( self : List[Any] ):
import datasets
snake_case_ = '''superb/wav2vec2-base-superb-ks'''
snake_case_ = pipeline('''audio-classification''' , model=SCREAMING_SNAKE_CASE_ )
snake_case_ = datasets.load_dataset('''anton-l/superb_dummy''' , '''ks''' , split='''test''' )
snake_case_ = np.array(dataset[3]['''speech'''] , dtype=np.floataa )
snake_case_ = audio_classifier(SCREAMING_SNAKE_CASE_ , top_k=4 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=3 ) , [
{'''score''': 0.981, '''label''': '''go'''},
{'''score''': 0.007, '''label''': '''up'''},
{'''score''': 0.006, '''label''': '''_unknown_'''},
{'''score''': 0.001, '''label''': '''down'''},
] , )
@require_tf
@unittest.skip('''Audio classification is not implemented for TF''' )
def A_ ( self : List[str] ):
pass
| 640 |
'''simple docstring'''
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square(lowercase__ : int , lowercase__ : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
__snake_case = update_area_of_max_square(lowercase__ , col + 1 )
__snake_case = update_area_of_max_square(row + 1 , col + 1 )
__snake_case = update_area_of_max_square(row + 1 , lowercase__ )
if mat[row][col]:
__snake_case = 1 + min([right, diagonal, down] )
__snake_case = max(largest_square_area[0] , lowercase__ )
return sub_problem_sol
else:
return 0
__snake_case = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
__snake_case = update_area_of_max_square_using_dp_array(lowercase__ , col + 1 , lowercase__ )
__snake_case = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , lowercase__ )
__snake_case = update_area_of_max_square_using_dp_array(row + 1 , lowercase__ , lowercase__ )
if mat[row][col]:
__snake_case = 1 + min([right, diagonal, down] )
__snake_case = max(largest_square_area[0] , lowercase__ )
__snake_case = sub_problem_sol
return sub_problem_sol
else:
return 0
__snake_case = [0]
__snake_case = [[-1] * cols for _ in range(lowercase__ )]
update_area_of_max_square_using_dp_array(0 , 0 , lowercase__ )
return largest_square_area[0]
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
"""simple docstring"""
__snake_case = [[0] * (cols + 1) for _ in range(rows + 1 )]
__snake_case = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__snake_case = dp_array[row][col + 1]
__snake_case = dp_array[row + 1][col + 1]
__snake_case = dp_array[row + 1][col]
if mat[row][col] == 1:
__snake_case = 1 + min(lowercase__ , lowercase__ , lowercase__ )
__snake_case = max(dp_array[row][col] , lowercase__ )
else:
__snake_case = 0
return largest_square_area
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
"""simple docstring"""
__snake_case = [0] * (cols + 1)
__snake_case = [0] * (cols + 1)
__snake_case = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__snake_case = current_row[col + 1]
__snake_case = next_row[col + 1]
__snake_case = next_row[col]
if mat[row][col] == 1:
__snake_case = 1 + min(lowercase__ , lowercase__ , lowercase__ )
__snake_case = max(current_row[col] , lowercase__ )
else:
__snake_case = 0
__snake_case = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 56 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
__UpperCAmelCase = {
"distilbert-base-uncased": 512,
"distilbert-base-uncased-distilled-squad": 512,
"distilbert-base-cased": 512,
"distilbert-base-cased-distilled-squad": 512,
"distilbert-base-german-cased": 512,
"distilbert-base-multilingual-cased": 512,
}
__UpperCAmelCase = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class lowerCamelCase (__lowercase ):
'''simple docstring'''
_snake_case : int = VOCAB_FILES_NAMES
_snake_case : Tuple = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : List[Any] = PRETRAINED_INIT_CONFIGURATION
_snake_case : Tuple = ["input_ids", "attention_mask"]
_snake_case : Union[str, Any] = DistilBertTokenizer
def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase="[UNK]" , _UpperCamelCase="[SEP]" , _UpperCamelCase="[PAD]" , _UpperCamelCase="[CLS]" , _UpperCamelCase="[MASK]" , _UpperCamelCase=True , _UpperCamelCase=None , **_UpperCamelCase , ) -> List[str]:
super().__init__(
SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
UpperCAmelCase_ : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , SCREAMING_SNAKE_CASE_ ) != do_lower_case
or normalizer_state.get('strip_accents' , SCREAMING_SNAKE_CASE_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , SCREAMING_SNAKE_CASE_ ) != tokenize_chinese_chars
):
UpperCAmelCase_ : Optional[int] = getattr(SCREAMING_SNAKE_CASE_ , normalizer_state.pop('type' ) )
UpperCAmelCase_ : str = do_lower_case
UpperCAmelCase_ : Dict = strip_accents
UpperCAmelCase_ : int = tokenize_chinese_chars
UpperCAmelCase_ : Tuple = normalizer_class(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ : Dict = do_lower_case
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=None ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]:
UpperCAmelCase_ : Union[str, Any] = [self.sep_token_id]
UpperCAmelCase_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> Tuple[str]:
UpperCAmelCase_ : Optional[Any] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
| 406 |
'''simple docstring'''
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def _a () -> Union[str, Any]:
"""simple docstring"""
__snake_case = 1_0
__snake_case = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
__snake_case = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [9_7], 'text': ['1976']}] * 1_0,
'id': list(range(lowercase__ ) ),
} , features=lowercase__ , )
return dataset
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple , lowercase__ : Dict ) -> Tuple:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=lowercase__ )
return filename
# FILE_CONTENT + files
_a : Union[str, Any] = "\\n Text data.\n Second line of data."
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> List[str]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt'
__snake_case = FILE_CONTENT
with open(lowercase__ , 'w' ) as f:
f.write(lowercase__ )
return filename
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
import bza
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
__snake_case = bytes(lowercase__ , 'utf-8' )
with bza.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Union[str, Any] ) -> Dict:
"""simple docstring"""
import gzip
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
__snake_case = bytes(lowercase__ , 'utf-8' )
with gzip.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple ) -> Optional[int]:
"""simple docstring"""
if datasets.config.LZ4_AVAILABLE:
import lza.frame
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
__snake_case = bytes(lowercase__ , 'utf-8' )
with lza.frame.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Dict , lowercase__ : Tuple ) -> Tuple:
"""simple docstring"""
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(lowercase__ , 'w' ) as archive:
archive.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[int] , lowercase__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
import tarfile
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(lowercase__ , 'w' ) as f:
f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> Tuple:
"""simple docstring"""
import lzma
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
__snake_case = bytes(lowercase__ , 'utf-8' )
with lzma.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Dict , lowercase__ : str ) -> Union[str, Any]:
"""simple docstring"""
import zipfile
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> int:
"""simple docstring"""
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
__snake_case = bytes(lowercase__ , 'utf-8' )
with zstd.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.xml'
__snake_case = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(lowercase__ , 'w' ) as f:
f.write(lowercase__ )
return filename
_a : int = [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
_a : List[str] = [
{"col_1": "4", "col_2": 4, "col_3": 4.0},
{"col_1": "5", "col_2": 5, "col_3": 5.0},
]
_a : Tuple = {
"col_1": ["0", "1", "2", "3"],
"col_2": [0, 1, 2, 3],
"col_3": [0.0, 1.0, 2.0, 3.0],
}
_a : Optional[int] = [
{"col_3": 0.0, "col_1": "0", "col_2": 0},
{"col_3": 1.0, "col_1": "1", "col_2": 1},
]
_a : Any = [
{"col_1": "s0", "col_2": 0, "col_3": 0.0},
{"col_1": "s1", "col_2": 1, "col_3": 1.0},
{"col_1": "s2", "col_2": 2, "col_3": 2.0},
{"col_1": "s3", "col_2": 3, "col_3": 3.0},
]
@pytest.fixture(scope='session' )
def _a () -> Optional[Any]:
"""simple docstring"""
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case = datasets.Dataset.from_dict(lowercase__ )
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Any ) -> Dict:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(lowercase__ ) ) as con:
__snake_case = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> Tuple:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(lowercase__ , 'w' , newline='' ) as f:
__snake_case = csv.DictWriter(lowercase__ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> Tuple:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(lowercase__ , 'w' , newline='' ) as f:
__snake_case = csv.DictWriter(lowercase__ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : str , lowercase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
import bza
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(lowercase__ , 'rb' ) as f:
__snake_case = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] , lowercase__ : Union[str, Any] , lowercase__ : str ) -> Optional[int]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] , lowercase__ : Tuple , lowercase__ : int ) -> int:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(lowercase__ , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] , lowercase__ : Dict , lowercase__ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] ) -> int:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
__snake_case = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(lowercase__ , 'wb' ) as f:
__snake_case = pq.ParquetWriter(lowercase__ , schema=lowercase__ )
__snake_case = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowercase__ ) )] for k in DATA[0]} , schema=lowercase__ )
writer.write_table(lowercase__ )
writer.close()
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> List[str]:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
__snake_case = {'data': DATA}
with open(lowercase__ , 'w' ) as f:
json.dump(lowercase__ , lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] ) -> List[Any]:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
__snake_case = {'data': DATA_DICT_OF_LISTS}
with open(lowercase__ , 'w' ) as f:
json.dump(lowercase__ , lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(lowercase__ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowercase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(lowercase__ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowercase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : int ) -> Tuple:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(lowercase__ , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(lowercase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Dict ) -> int:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(lowercase__ , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(lowercase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : int , lowercase__ : List[Any] ) -> Dict:
"""simple docstring"""
import gzip
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(lowercase__ , 'rb' ) as orig_file:
with gzip.open(lowercase__ , 'wb' ) as zipped_file:
zipped_file.writelines(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Union[str, Any] , lowercase__ : Dict ) -> Optional[Any]:
"""simple docstring"""
import gzip
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(lowercase__ , 'rb' ) as orig_file:
with gzip.open(lowercase__ , 'wb' ) as zipped_file:
zipped_file.writelines(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : str , lowercase__ : str , lowercase__ : str ) -> Optional[int]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Dict , lowercase__ : Optional[int] , lowercase__ : List[Any] , lowercase__ : List[Any] ) -> str:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.join('nested' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : str ) -> Optional[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : str , lowercase__ : Optional[int] , lowercase__ : Tuple ) -> List[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(lowercase__ , 'w' ) as f:
f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple , lowercase__ : Tuple , lowercase__ : Dict , lowercase__ : int ) -> Optional[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(lowercase__ , 'w' ) as f:
f.add(lowercase__ , arcname=os.path.join('nested' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__snake_case = ['0', '1', '2', '3']
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(lowercase__ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] ) -> List[Any]:
"""simple docstring"""
__snake_case = ['0', '1', '2', '3']
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(lowercase__ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[int] ) -> Dict:
"""simple docstring"""
__snake_case = ['0', '1', '2', '3']
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(lowercase__ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] , lowercase__ : Union[str, Any] , lowercase__ : Any ) -> str:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple , lowercase__ : Any , lowercase__ : Tuple ) -> List[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] , lowercase__ : Optional[int] , lowercase__ : Any ) -> Union[str, Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename('unsupported.ext' ) )
f.write(lowercase__ , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Any ) -> List[Any]:
"""simple docstring"""
__snake_case = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(lowercase__ , 'w' , encoding='utf-8' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a () -> int:
"""simple docstring"""
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def _a () -> Optional[int]:
"""simple docstring"""
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] , lowercase__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : str ) -> List[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 1_0 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 1_0 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
return data_dir
| 56 | 0 |
'''simple docstring'''
from typing import Any
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : list , lowerCamelCase__ : list , lowerCamelCase__ : dict , lowerCamelCase__ : dict , lowerCamelCase__ : dict , ):
'''simple docstring'''
_validation(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , )
# Creates data structures and fill initial step
A: Optional[Any] = {}
A: Any = {}
for state in states_space:
A: str = observations_space[0]
A: Union[str, Any] = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
A: List[Any] = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(lowercase__ ) ):
A: Optional[int] = observations_space[o]
A: int = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
A: Union[str, Any] = """"""
A: List[Any] = -1
for k_state in states_space:
A: Tuple = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
A: Optional[Any] = probability
A: Union[str, Any] = k_state
# Update probabilities and pointers dicts
A: List[str] = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
A: Any = arg_max
# The final observation
A: Union[str, Any] = observations_space[len(lowercase__ ) - 1]
# argmax for given final observation
A: Optional[Any] = """"""
A: Tuple = -1
for k_state in states_space:
A: Tuple = probabilities[(k_state, final_observation)]
if probability > max_probability:
A: Optional[Any] = probability
A: Optional[Any] = k_state
A: Tuple = arg_max
# Process pointers backwards
A: Optional[Any] = last_state
A: Tuple = []
for o in range(len(lowercase__ ) - 1 , -1 , -1 ):
result.append(lowercase__ )
A: List[str] = pointers[previous, observations_space[o]]
result.reverse()
return result
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Any , ):
'''simple docstring'''
_validate_not_empty(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , )
_validate_lists(lowercase__ , lowercase__ )
_validate_dicts(
lowercase__ , lowercase__ , lowercase__ )
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Any , ):
'''simple docstring'''
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("""There\'s an empty parameter""" )
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : Any , lowerCamelCase__ : Any ):
'''simple docstring'''
_validate_list(lowercase__ , """observations_space""" )
_validate_list(lowercase__ , """states_space""" )
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : Any , lowerCamelCase__ : str ):
'''simple docstring'''
if not isinstance(_object , lowercase__ ):
A: List[str] = f'{var_name} must be a list'
raise ValueError(lowercase__ )
else:
for x in _object:
if not isinstance(lowercase__ , lowercase__ ):
A: List[str] = f'{var_name} must be a list of strings'
raise ValueError(lowercase__ )
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Any , ):
'''simple docstring'''
_validate_dict(lowercase__ , """initial_probabilities""" , lowercase__ )
_validate_nested_dict(lowercase__ , """transition_probabilities""" )
_validate_nested_dict(lowercase__ , """emission_probabilities""" )
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : Any , lowerCamelCase__ : str ):
'''simple docstring'''
_validate_dict(_object , lowercase__ , lowercase__ )
for x in _object.values():
_validate_dict(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : Any , lowerCamelCase__ : str , lowerCamelCase__ : type , lowerCamelCase__ : bool = False ):
'''simple docstring'''
if not isinstance(_object , lowercase__ ):
A: Tuple = f'{var_name} must be a dict'
raise ValueError(lowercase__ )
if not all(isinstance(lowercase__ , lowercase__ ) for x in _object ):
A: str = f'{var_name} all keys must be strings'
raise ValueError(lowercase__ )
if not all(isinstance(lowercase__ , lowercase__ ) for x in _object.values() ):
A: str = """nested dictionary """ if nested else """"""
A: Any = f'{var_name} {nested_text}all values must be {value_type.__name__}'
raise ValueError(lowercase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 135 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a : Optional[Any] = logging.get_logger(__name__)
_a : Tuple = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = "camembert"
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3_0522 , SCREAMING_SNAKE_CASE_ : str=768 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=12 , SCREAMING_SNAKE_CASE_ : Dict=12 , SCREAMING_SNAKE_CASE_ : Optional[Any]=3072 , SCREAMING_SNAKE_CASE_ : Tuple="gelu" , SCREAMING_SNAKE_CASE_ : List[str]=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=512 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : Any=0.0_2 , SCREAMING_SNAKE_CASE_ : Tuple=1e-12 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1 , SCREAMING_SNAKE_CASE_ : Dict=0 , SCREAMING_SNAKE_CASE_ : int=2 , SCREAMING_SNAKE_CASE_ : Dict="absolute" , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : Tuple=None , **SCREAMING_SNAKE_CASE_ : Dict , ) -> int:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = position_embedding_type
__snake_case = use_cache
__snake_case = classifier_dropout
class _lowercase ( __lowercase ):
@property
def a ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__snake_case = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 56 | 0 |
"""simple docstring"""
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class __UpperCamelCase :
def __init__( self : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any]=13 , UpperCAmelCase : List[Any]=7 , UpperCAmelCase : str=True , UpperCAmelCase : str=True , UpperCAmelCase : Tuple=99 , UpperCAmelCase : Dict=32 , UpperCAmelCase : str=5 , UpperCAmelCase : Dict=4 , UpperCAmelCase : Union[str, Any]=37 , UpperCAmelCase : Any="gelu" , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : Dict=0.1 , UpperCAmelCase : Optional[int]=50 , UpperCAmelCase : str=0.0_2 , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Optional[int]=None , ) -> Dict:
lowerCAmelCase :Any = parent
lowerCAmelCase :List[Any] = batch_size
lowerCAmelCase :Union[str, Any] = seq_length
lowerCAmelCase :Union[str, Any] = is_training
lowerCAmelCase :Union[str, Any] = use_input_mask
lowerCAmelCase :Optional[int] = vocab_size
lowerCAmelCase :List[str] = hidden_size
lowerCAmelCase :Tuple = num_hidden_layers
lowerCAmelCase :List[str] = num_attention_heads
lowerCAmelCase :Dict = intermediate_size
lowerCAmelCase :int = hidden_act
lowerCAmelCase :Tuple = hidden_dropout_prob
lowerCAmelCase :int = attention_probs_dropout_prob
lowerCAmelCase :Dict = max_position_embeddings
lowerCAmelCase :Tuple = initializer_range
lowerCAmelCase :Optional[Any] = use_labels
lowerCAmelCase :Optional[int] = scope
def UpperCAmelCase__ ( self : str ) -> Tuple:
lowerCAmelCase :int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase :List[str] = None
if self.use_input_mask:
lowerCAmelCase :List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
lowerCAmelCase :str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase :Optional[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[int]:
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : int ) -> Dict:
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) :Any = self.prepare_config_and_inputs()
lowerCAmelCase :List[Any] = True
lowerCAmelCase :Optional[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCAmelCase__ ( self : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , **UpperCAmelCase : int , ) -> Optional[Any]:
lowerCAmelCase :Dict = BertGenerationEncoder(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase :Tuple = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase :Any = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Union[str, Any] , UpperCAmelCase : Tuple , UpperCAmelCase : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int] , **UpperCAmelCase : Dict , ) -> List[Any]:
lowerCAmelCase :Any = True
lowerCAmelCase :Optional[int] = BertGenerationEncoder(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase :Optional[Any] = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , encoder_attention_mask=SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase :Union[str, Any] = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] , **UpperCAmelCase : Union[str, Any] , ) -> Union[str, Any]:
lowerCAmelCase :str = True
lowerCAmelCase :Union[str, Any] = True
lowerCAmelCase :Optional[Any] = BertGenerationDecoder(config=SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ).eval()
# first forward pass
lowerCAmelCase :List[Any] = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , encoder_attention_mask=SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase :int = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase :str = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase :str = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase :Dict = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase :Dict = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase :Union[str, Any] = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , encoder_attention_mask=SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , )['hidden_states'][0]
lowerCAmelCase :Dict = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , encoder_attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , )['hidden_states'][0]
# select random slice
lowerCAmelCase :str = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase :List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase :str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
def UpperCAmelCase__ ( self : int , UpperCAmelCase : Dict , UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : Any , *UpperCAmelCase : List[Any] , ) -> Any:
lowerCAmelCase :int = BertGenerationDecoder(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase :List[Any] = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : str ) -> Union[str, Any]:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase :Optional[int] = self.prepare_config_and_inputs()
lowerCAmelCase :Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
lowercase_ : Optional[int] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
lowercase_ : Dict = (BertGenerationDecoder,) if is_torch_available() else ()
lowercase_ : str = (
{"feature-extraction": BertGenerationEncoder, "text-generation": BertGenerationDecoder}
if is_torch_available()
else {}
)
def UpperCAmelCase__ ( self : int ) -> Union[str, Any]:
lowerCAmelCase :Dict = BertGenerationEncoderTester(self )
lowerCAmelCase :List[str] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def UpperCAmelCase__ ( self : Any ) -> Optional[int]:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : List[Any] ) -> int:
lowerCAmelCase :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase__ ( self : Dict ) -> Optional[Any]:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase :Optional[int] = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase :Tuple = 'bert'
self.model_tester.create_and_check_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
lowerCAmelCase :Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase__ ( self : Dict ) -> Optional[Any]:
lowerCAmelCase :str = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase__ ( self : int ) -> Union[str, Any]:
# This regression test was failing with PyTorch < 1.3
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) :List[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
lowerCAmelCase :Dict = None
self.model_tester.create_and_check_model_as_decoder(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[Any]:
lowerCAmelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*SCREAMING_SNAKE_CASE_ )
@slow
def UpperCAmelCase__ ( self : Optional[Any] ) -> Any:
lowerCAmelCase :List[str] = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[Any]:
lowerCAmelCase :List[str] = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
lowerCAmelCase :Union[str, Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
lowerCAmelCase :List[str] = model(SCREAMING_SNAKE_CASE_ )[0]
lowerCAmelCase :List[Any] = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase :Optional[Any] = torch.tensor(
[[[0.1_7_7_5, 0.0_0_8_3, -0.0_3_2_1], [1.6_0_0_2, 0.1_2_8_7, 0.3_9_1_2], [2.1_4_7_3, 0.5_7_9_1, 0.6_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : List[Any] ) -> Any:
lowerCAmelCase :str = BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
lowerCAmelCase :Union[str, Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
lowerCAmelCase :Tuple = model(SCREAMING_SNAKE_CASE_ )[0]
lowerCAmelCase :List[str] = torch.Size([1, 8, 5_0358] )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase :List[str] = torch.tensor(
[[[-0.5_7_8_8, -2.5_9_9_4, -3.7_0_5_4], [0.0_4_3_8, 4.7_9_9_7, 1.8_7_9_5], [1.5_8_6_2, 6.6_4_0_9, 4.4_6_3_8]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 553 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : List[str] = logging.get_logger(__name__)
_a : Dict = {
"facebook/timesformer": "https://huggingface.co/facebook/timesformer/resolve/main/config.json",
}
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : int = "timesformer"
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : List[str]=224 , SCREAMING_SNAKE_CASE_ : List[str]=16 , SCREAMING_SNAKE_CASE_ : Any=3 , SCREAMING_SNAKE_CASE_ : int=8 , SCREAMING_SNAKE_CASE_ : Tuple=768 , SCREAMING_SNAKE_CASE_ : int=12 , SCREAMING_SNAKE_CASE_ : Optional[int]=12 , SCREAMING_SNAKE_CASE_ : Optional[int]=3072 , SCREAMING_SNAKE_CASE_ : Tuple="gelu" , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.0 , SCREAMING_SNAKE_CASE_ : List[Any]=0.0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0_2 , SCREAMING_SNAKE_CASE_ : Any=1e-6 , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : List[str]="divided_space_time" , SCREAMING_SNAKE_CASE_ : int=0 , **SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> List[str]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = num_frames
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = qkv_bias
__snake_case = attention_type
__snake_case = drop_path_rate
| 56 | 0 |
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: str ):
assert x is not None
assert y is not None
SCREAMING_SNAKE_CASE__ = len(lowercase__ )
SCREAMING_SNAKE_CASE__ = len(lowercase__ )
# declaring the array for storing the dp values
SCREAMING_SNAKE_CASE__ = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
SCREAMING_SNAKE_CASE__ = 1 if x[i - 1] == y[j - 1] else 0
SCREAMING_SNAKE_CASE__ = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
SCREAMING_SNAKE_CASE__ = """"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = m, n
while i > 0 and j > 0:
SCREAMING_SNAKE_CASE__ = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
SCREAMING_SNAKE_CASE__ = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
_lowerCamelCase = "AGGTAB"
_lowerCamelCase = "GXTXAYB"
_lowerCamelCase = 4
_lowerCamelCase = "GTAB"
_lowerCamelCase = longest_common_subsequence(a, b)
print('len =', ln, ', sub-sequence =', subseq)
import doctest
doctest.testmod()
| 6 |
'''simple docstring'''
from typing import Any
class _lowercase :
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Any ) -> Any:
__snake_case = data
__snake_case = None
class _lowercase :
def __init__( self : List[Any] ) -> Tuple:
__snake_case = None
def a ( self : int ) -> Union[str, Any]:
__snake_case = self.head
while temp is not None:
print(temp.data , end=' ' )
__snake_case = temp.next
print()
def a ( self : Dict , SCREAMING_SNAKE_CASE_ : Any ) -> List[str]:
__snake_case = Node(SCREAMING_SNAKE_CASE_ )
__snake_case = self.head
__snake_case = new_node
def a ( self : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any ) -> List[str]:
if node_data_a == node_data_a:
return
else:
__snake_case = self.head
while node_a is not None and node_a.data != node_data_a:
__snake_case = node_a.next
__snake_case = self.head
while node_a is not None and node_a.data != node_data_a:
__snake_case = node_a.next
if node_a is None or node_a is None:
return
__snake_case , __snake_case = node_a.data, node_a.data
if __name__ == "__main__":
_a : Dict = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("After swapping")
ll.print_list()
| 56 | 0 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase__ : Union[str, Any] = 1_6
lowercase__ : Optional[Any] = 3_2
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ = 1_6 ) -> Union[str, Any]:
lowerCAmelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowerCAmelCase = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(snake_case__ ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase = datasets.map(
lowercase__ , batched=lowercase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(snake_case__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase = 1_6
elif accelerator.mixed_precision != "no":
lowerCAmelCase = 8
else:
lowerCAmelCase = None
return tokenizer.pad(
lowercase__ , padding='''longest''' , max_length=lowercase__ , pad_to_multiple_of=lowercase__ , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowerCAmelCase = DataLoader(
tokenized_datasets['''train'''] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
lowerCAmelCase = DataLoader(
tokenized_datasets['''validation'''] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowercase__ : Optional[int] = mocked_dataloaders # noqa: F811
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> Optional[Any]:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , lowercase__ ) == "1":
lowerCAmelCase = 2
# Initialize accelerator
lowerCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase = config['''lr''']
lowerCAmelCase = int(config['''num_epochs'''] )
lowerCAmelCase = int(config['''seed'''] )
lowerCAmelCase = int(config['''batch_size'''] )
lowerCAmelCase = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
lowerCAmelCase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCAmelCase = batch_size // MAX_GPU_BATCH_SIZE
lowerCAmelCase = MAX_GPU_BATCH_SIZE
set_seed(lowercase__ )
lowerCAmelCase , lowerCAmelCase = get_dataloaders(lowercase__ , lowercase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=lowercase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase = AdamW(params=model.parameters() , lr=lowercase__ )
# Instantiate scheduler
lowerCAmelCase = get_linear_schedule_with_warmup(
optimizer=lowercase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(lowercase__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Now we train the model
for epoch in range(lowercase__ ):
model.train()
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCAmelCase = model(**lowercase__ )
lowerCAmelCase = outputs.loss
lowerCAmelCase = loss / gradient_accumulation_steps
accelerator.backward(lowercase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
lowerCAmelCase = 0
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase = model(**lowercase__ )
lowerCAmelCase = outputs.logits.argmax(dim=-1 )
lowerCAmelCase , lowerCAmelCase = accelerator.gather((predictions, batch['''labels''']) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(lowercase__ ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
lowerCAmelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCAmelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=lowercase__ , references=lowercase__ , )
lowerCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , lowercase__ )
def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]:
lowerCAmelCase = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=lowercase__ , default=lowercase__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
lowerCAmelCase = parser.parse_args()
lowerCAmelCase = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 4_2, '''batch_size''': 1_6}
training_function(lowercase__ , lowercase__ )
if __name__ == "__main__":
main()
| 312 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_a : int = {
"configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"],
"tokenization_tapas": ["TapasTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
"TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TapasForMaskedLM",
"TapasForQuestionAnswering",
"TapasForSequenceClassification",
"TapasModel",
"TapasPreTrainedModel",
"load_tf_weights_in_tapas",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = [
"TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFTapasForMaskedLM",
"TFTapasForQuestionAnswering",
"TFTapasForSequenceClassification",
"TFTapasModel",
"TFTapasPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
_a : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 56 | 0 |
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
A__ : str = logging.get_logger(__name__)
def UpperCamelCase( __UpperCamelCase : Tuple ):
lowerCAmelCase_ : Tuple = torch.load(lowercase__ ,map_location='''cpu''' )
if "model" in sd.keys():
lowerCAmelCase_ : str = torch.load(lowercase__ ,map_location='''cpu''' )['''model''']
# pop unnecessary weights
lowerCAmelCase_ : str = [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(lowercase__ )
lowerCAmelCase_ : List[Any] = {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
lowerCAmelCase_ : Tuple = sd.pop(lowercase__ )
lowerCAmelCase_ : str = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
lowerCAmelCase_ : Any = sd[key]
# We split QKV in separate Q,K,V
lowerCAmelCase_ : int = key.replace('''.qkv_proj.''' ,'''.q_proj.''' )
lowerCAmelCase_ : str = key.replace('''.qkv_proj.''' ,'''.k_proj.''' )
lowerCAmelCase_ : Any = key.replace('''.qkv_proj.''' ,'''.v_proj.''' )
lowerCAmelCase_ : Optional[int] = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = torch.split(lowercase__ ,depth // 3 ,dim=0 )
lowerCAmelCase_ : str = q
lowerCAmelCase_ : str = k
lowerCAmelCase_ : str = v
del sd[key]
return sd
@torch.no_grad()
def UpperCamelCase( __UpperCamelCase : str ,__UpperCamelCase : Tuple ,__UpperCamelCase : List[Any]=None ):
lowerCAmelCase_ : List[str] = load_checkpoint(lowercase__ )
if config is not None:
lowerCAmelCase_ : int = OPTConfig.from_pretrained(lowercase__ )
else:
lowerCAmelCase_ : Any = OPTConfig()
lowerCAmelCase_ : Optional[Any] = OPTModel(lowercase__ ).half().eval()
model.load_state_dict(lowercase__ )
# Check results
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
if __name__ == "__main__":
A__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fairseq_path''',
type=str,
help=(
'''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'''
''' https://huggingface.co/models?other=opt_metasq'''
),
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''')
A__ : List[Any] = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 171 |
'''simple docstring'''
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _lowercase ( __lowercase , __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : List[str] = AutoencoderKL
_SCREAMING_SNAKE_CASE : Union[str, Any] = "sample"
_SCREAMING_SNAKE_CASE : Union[str, Any] = 1e-2
@property
def a ( self : List[str] ) -> Optional[int]:
__snake_case = 4
__snake_case = 3
__snake_case = (32, 32)
__snake_case = floats_tensor((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE_ )
return {"sample": image}
@property
def a ( self : List[Any] ) -> List[Any]:
return (3, 32, 32)
@property
def a ( self : int ) -> int:
return (3, 32, 32)
def a ( self : Tuple ) -> Union[str, Any]:
__snake_case = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
__snake_case = self.dummy_input
return init_dict, inputs_dict
def a ( self : Optional[Any] ) -> Any:
pass
def a ( self : Tuple ) -> List[Any]:
pass
@unittest.skipIf(torch_device == 'mps' , 'Gradient checkpointing skipped on MPS' )
def a ( self : List[str] ) -> int:
# enable deterministic behavior for gradient checkpointing
__snake_case , __snake_case = self.prepare_init_args_and_inputs_for_common()
__snake_case = self.model_class(**SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
assert not model.is_gradient_checkpointing and model.training
__snake_case = model(**SCREAMING_SNAKE_CASE_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
__snake_case = torch.randn_like(SCREAMING_SNAKE_CASE_ )
__snake_case = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
__snake_case = self.model_class(**SCREAMING_SNAKE_CASE_ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(SCREAMING_SNAKE_CASE_ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
__snake_case = model_a(**SCREAMING_SNAKE_CASE_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
__snake_case = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
__snake_case = dict(model.named_parameters() )
__snake_case = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) )
def a ( self : int ) -> int:
__snake_case , __snake_case = AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy' , output_loading_info=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(SCREAMING_SNAKE_CASE_ )
__snake_case = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def a ( self : Optional[int] ) -> List[str]:
__snake_case = AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy' )
__snake_case = model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
if torch_device == "mps":
__snake_case = torch.manual_seed(0 )
else:
__snake_case = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
__snake_case = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
__snake_case = image.to(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ , sample_posterior=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).sample
__snake_case = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
__snake_case = torch.tensor(
[
-4.0_078e-01,
-3.8_323e-04,
-1.2_681e-01,
-1.1_462e-01,
2.0_095e-01,
1.0_893e-01,
-8.8_247e-02,
-3.0_361e-01,
-9.8_644e-03,
] )
elif torch_device == "cpu":
__snake_case = torch.tensor(
[-0.1_3_5_2, 0.0_8_7_8, 0.0_4_1_9, -0.0_8_1_8, -0.1_0_6_9, 0.0_6_8_8, -0.1_4_5_8, -0.4_4_4_6, -0.0_0_2_6] )
else:
__snake_case = torch.tensor(
[-0.2_4_2_1, 0.4_6_4_2, 0.2_5_0_7, -0.0_4_3_8, 0.0_6_8_2, 0.3_1_6_0, -0.2_0_1_8, -0.0_7_2_7, 0.2_4_8_5] )
self.assertTrue(torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , rtol=1e-2 ) )
@slow
class _lowercase ( unittest.TestCase ):
def a ( self : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any ) -> Union[str, Any]:
return f'gaussian_noise_s={seed}_shape={"_".join([str(SCREAMING_SNAKE_CASE_ ) for s in shape] )}.npy'
def a ( self : Optional[Any] ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : int , SCREAMING_SNAKE_CASE_ : Optional[Any]=0 , SCREAMING_SNAKE_CASE_ : int=(4, 3, 512, 512) , SCREAMING_SNAKE_CASE_ : str=False ) -> int:
__snake_case = torch.floataa if fpaa else torch.floataa
__snake_case = torch.from_numpy(load_hf_numpy(self.get_file_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) ).to(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
return image
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple="CompVis/stable-diffusion-v1-4" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False ) -> List[str]:
__snake_case = 'fp16' if fpaa else None
__snake_case = torch.floataa if fpaa else torch.floataa
__snake_case = AutoencoderKL.from_pretrained(
SCREAMING_SNAKE_CASE_ , subfolder='vae' , torch_dtype=SCREAMING_SNAKE_CASE_ , revision=SCREAMING_SNAKE_CASE_ , )
model.to(SCREAMING_SNAKE_CASE_ ).eval()
return model
def a ( self : List[str] , SCREAMING_SNAKE_CASE_ : Tuple=0 ) -> Union[str, Any]:
if torch_device == "mps":
return torch.manual_seed(SCREAMING_SNAKE_CASE_ )
return torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_3, 0.9_8_7_8, -0.0_4_9_5, -0.0_7_9_0, -0.2_7_0_9, 0.8_3_7_5, -0.2_0_6_0, -0.0_8_2_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_6, 0.1_1_6_8, 0.1_3_3_2, -0.4_8_4_0, -0.2_5_0_8, -0.0_7_9_1, -0.0_4_9_3, -0.4_0_8_9], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def a ( self : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_generator(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , sample_posterior=SCREAMING_SNAKE_CASE_ ).sample
assert sample.shape == image.shape
__snake_case = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__snake_case = torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_5_1_3, 0.0_2_8_9, 1.3_7_9_9, 0.2_1_6_6, -0.2_5_7_3, -0.0_8_7_1, 0.5_1_0_3, -0.0_9_9_9]],
[47, [-0.4_1_2_8, -0.1_3_2_0, -0.3_7_0_4, 0.1_9_6_5, -0.4_1_1_6, -0.2_3_3_2, -0.3_3_4_0, 0.2_2_4_7]],
# fmt: on
] )
@require_torch_gpu
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Union[str, Any]:
__snake_case = self.get_sd_vae_model(fpaa=SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , fpaa=SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_generator(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , sample_posterior=SCREAMING_SNAKE_CASE_ ).sample
assert sample.shape == image.shape
__snake_case = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE_ )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_9, 0.9_8_6_6, -0.0_4_8_7, -0.0_7_7_7, -0.2_7_1_6, 0.8_3_6_8, -0.2_0_5_5, -0.0_8_1_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_7, 0.1_1_4_7, 0.1_3_3_3, -0.4_8_4_1, -0.2_5_0_6, -0.0_8_0_5, -0.0_4_9_1, -0.4_0_8_5], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def a ( self : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict ) -> List[Any]:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ ).sample
assert sample.shape == image.shape
__snake_case = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__snake_case = torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_0_5_1, -0.1_8_0_3, -0.2_3_1_1, -0.2_1_1_4, -0.3_2_9_2, -0.3_5_7_4, -0.2_9_5_3, -0.3_3_2_3]],
[37, [-0.2_6_3_2, -0.2_6_2_5, -0.2_1_9_9, -0.2_7_4_1, -0.4_5_3_9, -0.4_9_9_0, -0.3_7_2_0, -0.4_9_2_5]],
# fmt: on
] )
@require_torch_gpu
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> int:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__snake_case = sample[-1, -2:, :2, -2:].flatten().cpu()
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE_ )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_3_6_9, 0.0_2_0_7, -0.0_7_7_6, -0.0_6_8_2, -0.1_7_4_7, -0.1_9_3_0, -0.1_4_6_5, -0.2_0_3_9]],
[16, [-0.1_6_2_8, -0.2_1_3_4, -0.2_7_4_7, -0.2_6_4_2, -0.3_7_7_4, -0.4_4_0_4, -0.3_6_8_7, -0.4_2_7_7]],
# fmt: on
] )
@require_torch_gpu
def a ( self : int , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> str:
__snake_case = self.get_sd_vae_model(fpaa=SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 64, 64) , fpaa=SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__snake_case = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE_ )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='xformers is not required when using PyTorch 2.0.' )
def a ( self : Any , SCREAMING_SNAKE_CASE_ : int ) -> Tuple:
__snake_case = self.get_sd_vae_model(fpaa=SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 64, 64) , fpaa=SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='xformers is not required when using PyTorch 2.0.' )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE_ : int ) -> str:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_0_0_1, 0.0_9_1_8, -2.6_9_8_4, -3.9_7_2_0, -3.2_0_9_9, -5.0_3_5_3, 1.7_3_3_8, -0.2_0_6_5, 3.4_2_6_7]],
[47, [-1.5_0_3_0, -4.3_8_7_1, -6.0_3_5_5, -9.1_1_5_7, -1.6_6_6_1, -2.7_8_5_3, 2.1_6_0_7, -5.0_8_2_3, 2.5_6_3_3]],
# fmt: on
] )
def a ( self : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Union[str, Any]:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_generator(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model.encode(SCREAMING_SNAKE_CASE_ ).latent_dist
__snake_case = dist.sample(generator=SCREAMING_SNAKE_CASE_ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
__snake_case = sample[0, -1, -3:, -3:].flatten().cpu()
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE_ )
__snake_case = 3e-3 if torch_device != 'mps' else 1e-2
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=SCREAMING_SNAKE_CASE_ )
| 56 | 0 |
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
_SCREAMING_SNAKE_CASE = "base_with_context"
def _snake_case (_snake_case : int , _snake_case : Optional[Any]) -> Optional[int]:
_lowercase =nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding']))
_lowercase =nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding']) , requires_grad=lowercase__)
for lyr_num, lyr in enumerate(model.encoders):
_lowercase =weights[f'''layers_{lyr_num}''']
_lowercase =nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale']))
_lowercase =ly_weight['attention']
_lowercase =nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T))
_lowercase =nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T))
_lowercase =nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T))
_lowercase =nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T))
_lowercase =nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale']))
_lowercase =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T))
_lowercase =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T))
_lowercase =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T))
_lowercase =nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale']))
return model
def _snake_case (_snake_case : str , _snake_case : Any) -> Tuple:
_lowercase =nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T))
_lowercase =nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding']) , requires_grad=lowercase__)
for lyr_num, lyr in enumerate(model.encoders):
_lowercase =weights[f'''layers_{lyr_num}''']
_lowercase =ly_weight['attention']
_lowercase =nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T))
_lowercase =nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T))
_lowercase =nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T))
_lowercase =nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T))
_lowercase =nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale']))
_lowercase =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T))
_lowercase =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T))
_lowercase =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T))
_lowercase =nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale']))
_lowercase =nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale']))
return model
def _snake_case (_snake_case : Dict , _snake_case : str) -> int:
_lowercase =nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T))
_lowercase =nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T))
_lowercase =nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding']) , requires_grad=lowercase__)
_lowercase =nn.Parameter(
torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T))
for lyr_num, lyr in enumerate(model.decoders):
_lowercase =weights[f'''layers_{lyr_num}''']
_lowercase =nn.Parameter(
torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale']))
_lowercase =nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T))
_lowercase =ly_weight['self_attention']
_lowercase =nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T))
_lowercase =nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T))
_lowercase =nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T))
_lowercase =nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T))
_lowercase =ly_weight['MultiHeadDotProductAttention_0']
_lowercase =nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T))
_lowercase =nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T))
_lowercase =nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T))
_lowercase =nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T))
_lowercase =nn.Parameter(
torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale']))
_lowercase =nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale']))
_lowercase =nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T))
_lowercase =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T))
_lowercase =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T))
_lowercase =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T))
_lowercase =nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale']))
_lowercase =nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T))
return model
def _snake_case (_snake_case : Tuple) -> Tuple:
_lowercase =checkpoints.load_tax_checkpoint(args.checkpoint_path)
_lowercase =jnp.tree_util.tree_map(onp.array , lowercase__)
_lowercase =[
'from __gin__ import dynamic_registration',
'from music_spectrogram_diffusion.models.diffusion import diffusion_utils',
'diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0',
'diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()',
]
_lowercase =os.path.join(args.checkpoint_path , '..' , 'config.gin')
_lowercase =inference.parse_training_gin_file(lowercase__ , lowercase__)
_lowercase =inference.InferenceModel(args.checkpoint_path , lowercase__)
_lowercase =DDPMScheduler(beta_schedule='squaredcos_cap_v2' , variance_type='fixed_large')
_lowercase =SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['inputs'] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
_lowercase =SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['targets_context'] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
_lowercase =TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['targets_context'] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
_lowercase =load_notes_encoder(ta_checkpoint['target']['token_encoder'] , lowercase__)
_lowercase =load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'] , lowercase__)
_lowercase =load_decoder(ta_checkpoint['target']['decoder'] , lowercase__)
_lowercase =OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder')
_lowercase =SpectrogramDiffusionPipeline(
notes_encoder=lowercase__ , continuous_encoder=lowercase__ , decoder=lowercase__ , scheduler=lowercase__ , melgan=lowercase__ , )
if args.save:
pipe.save_pretrained(args.output_path)
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--output_path", default=None, type=str, required=True, help="Path to the converted model.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument(
"--checkpoint_path",
default=f'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help="Path to the original jax model checkpoint.",
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
main(args)
| 181 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _lowercase ( __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ShapEPipeline
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["prompt"]
_SCREAMING_SNAKE_CASE : Any = ["prompt"]
_SCREAMING_SNAKE_CASE : str = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
_SCREAMING_SNAKE_CASE : Optional[int] = False
@property
def a ( self : Any ) -> Optional[int]:
return 32
@property
def a ( self : List[Any] ) -> List[Any]:
return 32
@property
def a ( self : Tuple ) -> List[str]:
return self.time_input_dim * 4
@property
def a ( self : Dict ) -> Union[str, Any]:
return 8
@property
def a ( self : List[Any] ) -> Optional[Any]:
__snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def a ( self : Dict ) -> Any:
torch.manual_seed(0 )
__snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(SCREAMING_SNAKE_CASE_ )
@property
def a ( self : str ) -> Dict:
torch.manual_seed(0 )
__snake_case = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__snake_case = PriorTransformer(**SCREAMING_SNAKE_CASE_ )
return model
@property
def a ( self : Optional[Any] ) -> Dict:
torch.manual_seed(0 )
__snake_case = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__snake_case = ShapERenderer(**SCREAMING_SNAKE_CASE_ )
return model
def a ( self : Tuple ) -> Dict:
__snake_case = self.dummy_prior
__snake_case = self.dummy_text_encoder
__snake_case = self.dummy_tokenizer
__snake_case = self.dummy_renderer
__snake_case = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1024 , prediction_type='sample' , use_karras_sigmas=SCREAMING_SNAKE_CASE_ , clip_sample=SCREAMING_SNAKE_CASE_ , clip_sample_range=1.0 , )
__snake_case = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def a ( self : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int]=0 ) -> Union[str, Any]:
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
__snake_case = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
__snake_case = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
__snake_case = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def a ( self : Optional[Any] ) -> str:
__snake_case = 'cpu'
__snake_case = self.get_dummy_components()
__snake_case = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
__snake_case = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__snake_case = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) )
__snake_case = output.images[0]
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__snake_case = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a ( self : int ) -> List[str]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def a ( self : Dict ) -> Any:
__snake_case = torch_device == 'cpu'
__snake_case = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=SCREAMING_SNAKE_CASE_ , relax_max_difference=SCREAMING_SNAKE_CASE_ , )
def a ( self : Union[str, Any] ) -> str:
__snake_case = self.get_dummy_components()
__snake_case = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
__snake_case = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__snake_case = 1
__snake_case = 2
__snake_case = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
for key in inputs.keys():
if key in self.batch_params:
__snake_case = batch_size * [inputs[key]]
__snake_case = pipe(**SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
def a ( self : Optional[int] ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : Union[str, Any] ) -> Optional[Any]:
__snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
__snake_case = ShapEPipeline.from_pretrained('openai/shap-e' )
__snake_case = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__snake_case = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
__snake_case = pipe(
'a shark' , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 56 | 0 |
def lowerCAmelCase_ ( lowercase: str , lowercase: int ) -> str:
'''simple docstring'''
_UpperCamelCase: Tuple = [[] for _ in range(lowercase__ )]
_UpperCamelCase: Union[str, Any] = key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1 or len(lowercase__ ) <= key:
return input_string
for position, character in enumerate(lowercase__ ):
_UpperCamelCase: Optional[int] = position % (lowest * 2) # puts it in bounds
_UpperCamelCase: List[Any] = min(lowercase__ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(lowercase__ )
_UpperCamelCase: Optional[int] = [''''''.join(lowercase__ ) for row in temp_grid]
_UpperCamelCase: Dict = ''''''.join(lowercase__ )
return output_string
def lowerCAmelCase_ ( lowercase: str , lowercase: int ) -> str:
'''simple docstring'''
_UpperCamelCase: List[str] = []
_UpperCamelCase: Optional[int] = key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1:
return input_string
_UpperCamelCase: Dict = [[] for _ in range(lowercase__ )] # generates template
for position in range(len(lowercase__ ) ):
_UpperCamelCase: Optional[int] = position % (lowest * 2) # puts it in bounds
_UpperCamelCase: Union[str, Any] = min(lowercase__ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('''*''' )
_UpperCamelCase: str = 0
for row in temp_grid: # fills in the characters
_UpperCamelCase: Optional[int] = input_string[counter : counter + len(lowercase__ )]
grid.append(list(lowercase__ ) )
counter += len(lowercase__ )
_UpperCamelCase: Optional[int] = '''''' # reads as zigzag
for position in range(len(lowercase__ ) ):
_UpperCamelCase: List[str] = position % (lowest * 2) # puts it in bounds
_UpperCamelCase: str = min(lowercase__ , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def lowerCAmelCase_ ( lowercase: str ) -> dict[int, str]:
'''simple docstring'''
_UpperCamelCase: int = {}
for key_guess in range(1 , len(lowercase__ ) ): # tries every key
_UpperCamelCase: List[Any] = decrypt(lowercase__ , lowercase__ )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 271 |
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
_a : Optional[Any] = 100
_a : Dict = set(range(3, NUM_PRIMES, 2))
primes.add(2)
_a : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_0_0 )
def _a (lowercase__ : int ) -> set[int]:
"""simple docstring"""
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
__snake_case = set()
__snake_case = 42
__snake_case = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def _a (lowercase__ : int = 5_0_0_0 ) -> int | None:
"""simple docstring"""
for number_to_partition in range(1 , lowercase__ ):
if len(partition(lowercase__ ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f'''{solution() = }''')
| 56 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"google/vivit-b-16x2-kinetics400": (
"https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class _lowerCAmelCase ( __lowercase ):
lowercase_ : List[str] = "vivit"
def __init__( self , a_=224 , a_=32 , a_=[2, 16, 16] , a_=3 , a_=768 , a_=12 , a_=12 , a_=3072 , a_="gelu_fast" , a_=0.0 , a_=0.0 , a_=0.02 , a_=1e-06 , a_=True , **a_ , ) -> Optional[int]:
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = image_size
_UpperCAmelCase = num_frames
_UpperCAmelCase = tubelet_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = qkv_bias
super().__init__(**SCREAMING_SNAKE_CASE_ )
| 657 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_a : str = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def _a () -> Dict:
"""simple docstring"""
__snake_case = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
__snake_case = get_sagemaker_input()
else:
__snake_case = get_cluster_input()
return config
def _a (lowercase__ : Union[str, Any]=None ) -> int:
"""simple docstring"""
if subparsers is not None:
__snake_case = subparsers.add_parser('config' , description=lowercase__ )
else:
__snake_case = argparse.ArgumentParser('Accelerate config command' , description=lowercase__ )
parser.add_argument(
'--config_file' , default=lowercase__ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=lowercase__ )
return parser
def _a (lowercase__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case = get_user_input()
if args.config_file is not None:
__snake_case = args.config_file
else:
if not os.path.isdir(lowercase__ ):
os.makedirs(lowercase__ )
__snake_case = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(lowercase__ )
else:
config.to_yaml_file(lowercase__ )
print(f'accelerate configuration saved at {config_file}' )
def _a () -> int:
"""simple docstring"""
__snake_case = config_command_parser()
__snake_case = parser.parse_args()
config_command(lowercase__ )
if __name__ == "__main__":
main()
| 56 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : str ):
def get_matched_characters(lowercase_ : str , lowercase_ : str ) -> str:
lowercase = []
lowercase = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
lowercase = int(max(0 , i - limit ) )
lowercase = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(lowercase__ )
lowercase = F"""{_stra[0:_stra.index(lowercase__ )]} {_stra[_stra.index(lowercase__ ) + 1:]}"""
return "".join(lowercase__ )
# matching characters
lowercase = get_matched_characters(lowercase__ , lowercase__ )
lowercase = get_matched_characters(lowercase__ , lowercase__ )
lowercase = len(lowercase__ )
# transposition
lowercase = (
len([(ca, ca) for ca, ca in zip(lowercase__ , lowercase__ ) if ca != ca] ) // 2
)
if not match_count:
lowercase = 0.0
else:
lowercase = (
1
/ 3
* (
match_count / len(lowercase__ )
+ match_count / len(lowercase__ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
lowercase = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('''hello''', '''world'''))
| 588 |
'''simple docstring'''
from __future__ import annotations
import math
def _a (lowercase__ : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowercase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
_a : Dict = [num for num in range(3, 100_001, 2) if not is_prime(num)]
def _a (lowercase__ : int ) -> list[int]:
"""simple docstring"""
if not isinstance(lowercase__ , lowercase__ ):
raise ValueError('n must be an integer' )
if n <= 0:
raise ValueError('n must be >= 0' )
__snake_case = []
for num in range(len(lowercase__ ) ):
__snake_case = 0
while 2 * i * i <= odd_composites[num]:
__snake_case = odd_composites[num] - 2 * i * i
if is_prime(lowercase__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(lowercase__ ) == n:
return list_nums
return []
def _a () -> int:
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 56 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
a : Optional[Any] = logging.get_logger(__name__)
class a ( __lowercase ):
def __init__( self : Any , *lowercase_ : Optional[Any] , **lowercase_ : Optional[int] ):
warnings.warn(
'''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use GLPNImageProcessor instead.''' , SCREAMING_SNAKE_CASE_ , )
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 640 |
'''simple docstring'''
from __future__ import annotations
def _a (lowercase__ : int , lowercase__ : int ) -> list[str]:
"""simple docstring"""
if partitions <= 0:
raise ValueError('partitions must be a positive number!' )
if partitions > number_of_bytes:
raise ValueError('partitions can not > number_of_bytes!' )
__snake_case = number_of_bytes // partitions
__snake_case = []
for i in range(lowercase__ ):
__snake_case = i * bytes_per_partition + 1
__snake_case = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 | 0 |
def lowercase__ ( __snake_case : int = 1_000_000 ):
'''simple docstring'''
UpperCAmelCase_ : Any = set(range(3 , lowercase__ , 2 ) )
primes.add(2 )
for p in range(3 , lowercase__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , lowercase__ , lowercase__ ) ) )
UpperCAmelCase_ : int = [float(lowercase__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowercase__ , limit + 1 , lowercase__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F'{solution() = }')
| 406 |
'''simple docstring'''
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class _lowercase ( __lowercase ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.0_1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1000 ) -> Tuple:
__snake_case = p_stop
__snake_case = max_length
def __iter__( self : Any ) -> Union[str, Any]:
__snake_case = 0
__snake_case = False
while not stop and count < self.max_length:
yield count
count += 1
__snake_case = random.random() < self.p_stop
class _lowercase ( unittest.TestCase ):
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : str=False , SCREAMING_SNAKE_CASE_ : str=True ) -> Union[str, Any]:
__snake_case = [
BatchSamplerShard(SCREAMING_SNAKE_CASE_ , 2 , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
for i in range(2 )
]
__snake_case = [list(SCREAMING_SNAKE_CASE_ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(SCREAMING_SNAKE_CASE_ ) for shard in batch_sampler_shards] , [len(SCREAMING_SNAKE_CASE_ ) for e in expected] )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Tuple ) -> str:
# Check the shards when the dataset is a round multiple of total batch size.
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] ) -> Union[str, Any]:
# Check the shards when the dataset is a round multiple of batch size.
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size.
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
def a ( self : str ) -> str:
# Check the shards when the dataset is a round multiple of total batch size.
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[[0, 1]], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
def a ( self : int ) -> Tuple:
# Check the shards when the dataset is a round multiple of batch size.
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size.
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[[0, 1]], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[int] ) -> Tuple:
__snake_case = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
__snake_case = [BatchSamplerShard(SCREAMING_SNAKE_CASE_ , 2 , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int=False , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : int=False ) -> List[Any]:
random.seed(SCREAMING_SNAKE_CASE_ )
__snake_case = list(SCREAMING_SNAKE_CASE_ )
__snake_case = [
IterableDatasetShard(
SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , drop_last=SCREAMING_SNAKE_CASE_ , num_processes=SCREAMING_SNAKE_CASE_ , process_index=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , )
for i in range(SCREAMING_SNAKE_CASE_ )
]
__snake_case = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(SCREAMING_SNAKE_CASE_ )
iterable_dataset_lists.append(list(SCREAMING_SNAKE_CASE_ ) )
__snake_case = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
__snake_case = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
self.assertTrue(len(SCREAMING_SNAKE_CASE_ ) % shard_batch_size == 0 )
__snake_case = []
for idx in range(0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(SCREAMING_SNAKE_CASE_ ) < len(SCREAMING_SNAKE_CASE_ ):
reference += reference
self.assertListEqual(SCREAMING_SNAKE_CASE_ , reference[: len(SCREAMING_SNAKE_CASE_ )] )
def a ( self : Dict ) -> Tuple:
__snake_case = 42
__snake_case = RandomIterableDataset()
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Edge case with a very small dataset
__snake_case = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] ) -> str:
__snake_case = BatchSampler(range(16 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = SkipBatchSampler(SCREAMING_SNAKE_CASE_ , 2 )
self.assertListEqual(list(SCREAMING_SNAKE_CASE_ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a ( self : str ) -> Union[str, Any]:
__snake_case = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a ( self : Any ) -> str:
__snake_case = DataLoader(list(range(16 ) ) , batch_size=4 )
__snake_case = skip_first_batches(SCREAMING_SNAKE_CASE_ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a ( self : Dict ) -> Optional[Any]:
__snake_case = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def a ( self : Tuple ) -> Dict:
Accelerator()
__snake_case = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 56 | 0 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE : Union[str, Any] =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : List[str] ={
"vocab_file": "vocab.txt",
"merges_file": "bpe.codes",
}
__SCREAMING_SNAKE_CASE : Dict ={
"vocab_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt",
},
"merges_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes",
},
}
__SCREAMING_SNAKE_CASE : Dict ={
"vinai/phobert-base": 256,
"vinai/phobert-large": 256,
}
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : List[Any] ):
'''simple docstring'''
A: List[Any] = set()
A: Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A: int = char
A: Tuple = set(lowercase__ )
return pairs
class SCREAMING_SNAKE_CASE__ ( __lowercase ):
"""simple docstring"""
A__ : Dict = VOCAB_FILES_NAMES
A__ : int = PRETRAINED_VOCAB_FILES_MAP
A__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , A , A , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , **A , ) -> List[str]:
super().__init__(
bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
A: List[str] = vocab_file
A: int = merges_file
A: Union[str, Any] = {}
A: Dict = 0
A: Optional[int] = 1
A: Union[str, Any] = 2
A: List[str] = 3
self.add_from_file(SCREAMING_SNAKE_CASE_ )
A: Optional[int] = {v: k for k, v in self.encoder.items()}
with open(SCREAMING_SNAKE_CASE_ , encoding="""utf-8""" ) as merges_handle:
A: Optional[int] = merges_handle.read().split("""\n""" )[:-1]
A: Dict = [tuple(merge.split()[:-1] ) for merge in merges]
A: Dict = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
A: List[str] = {}
def a__ ( self , A , A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A: Optional[int] = [self.cls_token_id]
A: str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a__ ( self , A , A = None , A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def a__ ( self , A , A = None ) -> List[int]:
A: List[Any] = [self.sep_token_id]
A: Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def a__ ( self ) -> Any:
return len(self.encoder )
def a__ ( self ) -> Optional[int]:
return dict(self.encoder , **self.added_tokens_encoder )
def a__ ( self , A ) -> List[Any]:
if token in self.cache:
return self.cache[token]
A: str = tuple(SCREAMING_SNAKE_CASE_ )
A: Optional[int] = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
A: List[str] = get_pairs(SCREAMING_SNAKE_CASE_ )
if not pairs:
return token
while True:
A: Optional[Any] = min(SCREAMING_SNAKE_CASE_ , key=lambda A : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
A , A: Any = bigram
A: str = []
A: Optional[Any] = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
try:
A: Optional[Any] = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A: int = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A: Tuple = tuple(SCREAMING_SNAKE_CASE_ )
A: Union[str, Any] = new_word
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
else:
A: Any = get_pairs(SCREAMING_SNAKE_CASE_ )
A: Union[str, Any] = """@@ """.join(SCREAMING_SNAKE_CASE_ )
A: Optional[Any] = word[:-4]
A: Any = word
return word
def a__ ( self , A ) -> Optional[Any]:
A: Union[str, Any] = []
A: Tuple = re.findall(r"""\S+\n?""" , SCREAMING_SNAKE_CASE_ )
for token in words:
split_tokens.extend(list(self.bpe(SCREAMING_SNAKE_CASE_ ).split(""" """ ) ) )
return split_tokens
def a__ ( self , A ) -> Optional[Any]:
return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) )
def a__ ( self , A ) -> List[Any]:
return self.decoder.get(SCREAMING_SNAKE_CASE_ , self.unk_token )
def a__ ( self , A ) -> Optional[int]:
A: Optional[int] = """ """.join(SCREAMING_SNAKE_CASE_ ).replace("""@@ """ , """""" ).strip()
return out_string
def a__ ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A: Union[str, Any] = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A: List[Any] = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
if os.path.abspath(self.merges_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ):
copyfile(self.merges_file , SCREAMING_SNAKE_CASE_ )
return out_vocab_file, out_merge_file
def a__ ( self , A ) -> List[Any]:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
try:
with open(SCREAMING_SNAKE_CASE_ , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(SCREAMING_SNAKE_CASE_ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'Incorrect encoding detected in {f}, please rebuild the dataset' )
return
A: Optional[Any] = f.readlines()
for lineTmp in lines:
A: int = lineTmp.strip()
A: List[str] = line.rfind(""" """ )
if idx == -1:
raise ValueError("""Incorrect dictionary format, expected \'<token> <cnt>\'""" )
A: Dict = line[:idx]
A: str = len(self.encoder )
| 135 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
_a : int = get_tests_dir("fixtures/test_sentencepiece.model")
_a : Dict = {"target_lang": "fi", "source_lang": "en"}
_a : Optional[int] = ">>zh<<"
_a : List[str] = "Helsinki-NLP/"
if is_torch_available():
_a : List[str] = "pt"
elif is_tf_available():
_a : Dict = "tf"
else:
_a : Union[str, Any] = "jax"
@require_sentencepiece
class _lowercase ( __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : int = MarianTokenizer
_SCREAMING_SNAKE_CASE : str = False
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
def a ( self : int ) -> int:
super().setUp()
__snake_case = ['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>']
__snake_case = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
__snake_case = Path(self.tmpdirname )
save_json(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['vocab'] )
save_json(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['tokenizer_config_file'] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['source_spm'] )
copyfile(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['target_spm'] )
__snake_case = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def a ( self : int , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> MarianTokenizer:
return MarianTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def a ( self : str , SCREAMING_SNAKE_CASE_ : List[str] ) -> List[Any]:
return (
"This is a test",
"This is a test",
)
def a ( self : int ) -> Optional[Any]:
__snake_case = '</s>'
__snake_case = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def a ( self : Dict ) -> List[str]:
__snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '</s>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '<pad>' )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 9 )
def a ( self : List[Any] ) -> str:
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def a ( self : Any ) -> Optional[int]:
__snake_case = MarianTokenizer.from_pretrained(f'{ORG_NAME}opus-mt-en-de' )
__snake_case = en_de_tokenizer(['I am a small frog'] , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = [38, 121, 14, 697, 3_8848, 0]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , batch.input_ids[0] )
__snake_case = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
__snake_case = [x.name for x in Path(SCREAMING_SNAKE_CASE_ ).glob('*' )]
self.assertIn('source.spm' , SCREAMING_SNAKE_CASE_ )
MarianTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[int] ) -> Any:
__snake_case = self.get_tokenizer()
__snake_case = tok(
['I am a small frog' * 1000, 'I am a small frog'] , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def a ( self : Tuple ) -> Dict:
__snake_case = self.get_tokenizer()
__snake_case = tok(['I am a tiny frog', 'I am a small frog'] , padding=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def a ( self : int ) -> int:
# fmt: off
__snake_case = {'input_ids': [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE_ , model_name='Helsinki-NLP/opus-mt-en-de' , revision='1a8c2263da11e68e50938f97e10cd57820bd504c' , decode_kwargs={'use_source_tokenizer': True} , )
def a ( self : Dict ) -> str:
__snake_case = MarianTokenizer.from_pretrained('hf-internal-testing/test-marian-two-vocabs' )
__snake_case = 'Tämä on testi'
__snake_case = 'This is a test'
__snake_case = [76, 7, 2047, 2]
__snake_case = [69, 12, 11, 940, 2]
__snake_case = tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer(text_target=SCREAMING_SNAKE_CASE_ ).input_ids
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 56 | 0 |
"""simple docstring"""
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
__SCREAMING_SNAKE_CASE = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
__SCREAMING_SNAKE_CASE = typing.Union[np.floataa, int, float] # noqa: UP007
def UpperCAmelCase ( a__ , a__ ):
'''simple docstring'''
return np.sqrt(np.sum((np.asarray(lowercase__ ) - np.asarray(lowercase__ )) ** 2 ) )
def UpperCAmelCase ( a__ , a__ ):
'''simple docstring'''
return sum((va - va) ** 2 for va, va in zip(lowercase__ , lowercase__ ) ) ** (1 / 2)
if __name__ == "__main__":
def UpperCAmelCase ( ):
'''simple docstring'''
from timeit import timeit
print('Without Numpy' )
print(
timeit(
'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' , number=1_00_00 , globals=globals() , ) )
print('With Numpy' )
print(
timeit(
'euclidean_distance([1, 2, 3], [4, 5, 6])' , number=1_00_00 , globals=globals() , ) )
benchmark()
| 553 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def _a (lowercase__ : bytes ) -> bytes:
"""simple docstring"""
if len(lowercase__ ) != 3_2:
raise ValueError('Input must be of length 32' )
__snake_case = B''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def _a (lowercase__ : int ) -> bytes:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
__snake_case = format(lowercase__ , '08x' )[-8:]
__snake_case = B''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def _a (lowercase__ : bytes ) -> bytes:
"""simple docstring"""
__snake_case = B''
for char in message:
bit_string += format(lowercase__ , '08b' ).encode('utf-8' )
__snake_case = format(len(lowercase__ ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(lowercase__ ) % 5_1_2 != 4_4_8:
bit_string += b"0"
bit_string += to_little_endian(start_len[3_2:] ) + to_little_endian(start_len[:3_2] )
return bit_string
def _a (lowercase__ : bytes ) -> Generator[list[int], None, None]:
"""simple docstring"""
if len(lowercase__ ) % 5_1_2 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(lowercase__ ) , 5_1_2 ):
__snake_case = bit_string[pos : pos + 5_1_2]
__snake_case = []
for i in range(0 , 5_1_2 , 3_2 ):
block_words.append(int(to_little_endian(block[i : i + 3_2] ) , 2 ) )
yield block_words
def _a (lowercase__ : int ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
__snake_case = format(lowercase__ , '032b' )
__snake_case = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(lowercase__ , 2 )
def _a (lowercase__ : int , lowercase__ : int ) -> int:
"""simple docstring"""
return (a + b) % 2**3_2
def _a (lowercase__ : int , lowercase__ : int ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (3_2 - shift))) % 2**3_2
def _a (lowercase__ : bytes ) -> bytes:
"""simple docstring"""
__snake_case = preprocess(lowercase__ )
__snake_case = [int(2**3_2 * abs(sin(i + 1 ) ) ) for i in range(6_4 )]
# Starting states
__snake_case = 0x6_7_4_5_2_3_0_1
__snake_case = 0xE_F_C_D_A_B_8_9
__snake_case = 0x9_8_B_A_D_C_F_E
__snake_case = 0x1_0_3_2_5_4_7_6
__snake_case = [
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(lowercase__ ):
__snake_case = aa
__snake_case = ba
__snake_case = ca
__snake_case = da
# Hash current chunk
for i in range(6_4 ):
if i <= 1_5:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
__snake_case = d ^ (b & (c ^ d))
__snake_case = i
elif i <= 3_1:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
__snake_case = c ^ (d & (b ^ c))
__snake_case = (5 * i + 1) % 1_6
elif i <= 4_7:
__snake_case = b ^ c ^ d
__snake_case = (3 * i + 5) % 1_6
else:
__snake_case = c ^ (b | not_aa(lowercase__ ))
__snake_case = (7 * i) % 1_6
__snake_case = (f + a + added_consts[i] + block_words[g]) % 2**3_2
__snake_case = d
__snake_case = c
__snake_case = b
__snake_case = sum_aa(lowercase__ , left_rotate_aa(lowercase__ , shift_amounts[i] ) )
# Add hashed chunk to running total
__snake_case = sum_aa(lowercase__ , lowercase__ )
__snake_case = sum_aa(lowercase__ , lowercase__ )
__snake_case = sum_aa(lowercase__ , lowercase__ )
__snake_case = sum_aa(lowercase__ , lowercase__ )
__snake_case = reformat_hex(lowercase__ ) + reformat_hex(lowercase__ ) + reformat_hex(lowercase__ ) + reformat_hex(lowercase__ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 | 0 |
import math
import unittest
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int ):
assert isinstance(lowercase__ , lowercase__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowercase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class UpperCamelCase_ ( unittest.TestCase ):
def _snake_case ( self :Optional[Any] ) -> Any:
"""simple docstring"""
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def _snake_case ( self :Tuple ) -> Dict:
"""simple docstring"""
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , """Zero doesn\'t have any positive factors, primes must have exactly two.""" , )
self.assertFalse(
is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 6 |
'''simple docstring'''
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def _a (lowercase__ : str , lowercase__ : str , lowercase__ : Optional[str] = None ) -> str:
"""simple docstring"""
if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release:
# old versions of hfh don't url-encode the file path
__snake_case = quote(lowercase__ )
return hfh.hf_hub_url(lowercase__ , lowercase__ , repo_type='dataset' , revision=lowercase__ )
| 56 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ : List[str] = {
"configuration_clipseg": [
"CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPSegConfig",
"CLIPSegTextConfig",
"CLIPSegVisionConfig",
],
"processing_clipseg": ["CLIPSegProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Union[str, Any] = [
"CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPSegModel",
"CLIPSegPreTrainedModel",
"CLIPSegTextModel",
"CLIPSegVisionModel",
"CLIPSegForImageSegmentation",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
lowercase__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 312 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def _a (lowercase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _lowercase ( nn.Module ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : nn.Module , SCREAMING_SNAKE_CASE_ : int ) -> str:
super().__init__()
__snake_case = module
__snake_case = nn.Sequential(
nn.Linear(module.in_features , SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ ) , nn.Linear(SCREAMING_SNAKE_CASE_ , module.out_features , bias=SCREAMING_SNAKE_CASE_ ) , )
__snake_case = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=SCREAMING_SNAKE_CASE_ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Union[str, Any]:
return self.module(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) + self.adapter(SCREAMING_SNAKE_CASE_ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowercase ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
_SCREAMING_SNAKE_CASE : Tuple = "bigscience/bloom-1b7"
# Constant values
_SCREAMING_SNAKE_CASE : Union[str, Any] = 2.109659552692574
_SCREAMING_SNAKE_CASE : Optional[Any] = "Hello my name is"
_SCREAMING_SNAKE_CASE : List[str] = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
_SCREAMING_SNAKE_CASE : Dict = 1_0
def a ( self : Optional[Any] ) -> List[Any]:
# Models and tokenizer
__snake_case = AutoTokenizer.from_pretrained(self.model_name )
class _lowercase ( __lowercase ):
def a ( self : Union[str, Any] ) -> List[str]:
super().setUp()
# Models and tokenizer
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto' )
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
def a ( self : Optional[Any] ) -> Any:
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def a ( self : Optional[Any] ) -> int:
__snake_case = self.model_abit.config
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'quantization_config' ) )
__snake_case = config.to_dict()
__snake_case = config.to_diff_dict()
__snake_case = config.to_json_string()
def a ( self : Optional[Any] ) -> str:
from bitsandbytes.nn import Paramsabit
__snake_case = self.model_fpaa.get_memory_footprint()
__snake_case = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
__snake_case = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def a ( self : Union[str, Any] ) -> Optional[Any]:
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(SCREAMING_SNAKE_CASE_ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def a ( self : Union[str, Any] ) -> int:
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
__snake_case = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) , self.EXPECTED_OUTPUTS )
def a ( self : Optional[Any] ) -> Dict:
__snake_case = BitsAndBytesConfig()
__snake_case = True
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
__snake_case = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) , self.EXPECTED_OUTPUTS )
def a ( self : List[Any] ) -> str:
with self.assertRaises(SCREAMING_SNAKE_CASE_ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(SCREAMING_SNAKE_CASE_ )
def a ( self : Any ) -> Union[str, Any]:
__snake_case = BitsAndBytesConfig()
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=SCREAMING_SNAKE_CASE_ , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' , bnb_abit_quant_type='nf4' , )
def a ( self : Tuple ) -> Dict:
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with `str`
self.model_abit.to('cpu' )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0' ) )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
__snake_case = self.model_fpaa.to(torch.floataa )
__snake_case = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
__snake_case = self.model_fpaa.to('cpu' )
# Check this does not throw an error
__snake_case = self.model_fpaa.half()
# Check this does not throw an error
__snake_case = self.model_fpaa.float()
def a ( self : Tuple ) -> Union[str, Any]:
__snake_case = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowercase ( unittest.TestCase ):
@classmethod
def a ( cls : Union[str, Any] ) -> Dict:
__snake_case = 't5-small'
__snake_case = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
__snake_case = AutoTokenizer.from_pretrained(cls.model_name )
__snake_case = 'Translate in German: Hello, my dog is cute'
def a ( self : List[Any] ) -> str:
gc.collect()
torch.cuda.empty_cache()
def a ( self : int ) -> Optional[Any]:
from transformers import TaForConditionalGeneration
__snake_case = TaForConditionalGeneration._keep_in_fpaa_modules
__snake_case = None
# test with `t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
# test with `flan-t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
__snake_case = modules
def a ( self : List[str] ) -> Any:
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
# test with `flan-t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
class _lowercase ( __lowercase ):
def a ( self : Dict ) -> str:
super().setUp()
# model_name
__snake_case = 'bigscience/bloom-560m'
__snake_case = 't5-small'
# Different types of model
__snake_case = AutoModel.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# Sequence classification model
__snake_case = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# CausalLM model
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# Seq2seq model
__snake_case = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
def a ( self : int ) -> Dict:
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def a ( self : Any ) -> Optional[Any]:
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class _lowercase ( __lowercase ):
def a ( self : str ) -> Union[str, Any]:
super().setUp()
def a ( self : Optional[Any] ) -> str:
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def a ( self : Optional[int] ) -> List[str]:
__snake_case = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__snake_case = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class _lowercase ( __lowercase ):
def a ( self : Optional[int] ) -> Union[str, Any]:
super().setUp()
def a ( self : Optional[int] ) -> List[Any]:
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='balanced' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
# Second real batch
__snake_case = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) , self.EXPECTED_OUTPUTS )
class _lowercase ( __lowercase ):
def a ( self : Any ) -> str:
__snake_case = 'facebook/opt-350m'
super().setUp()
def a ( self : int ) -> List[Any]:
if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ):
return
# Step 1: freeze all parameters
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
__snake_case = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__snake_case = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(SCREAMING_SNAKE_CASE_ ) ):
__snake_case = LoRALayer(module.q_proj , rank=16 )
__snake_case = LoRALayer(module.k_proj , rank=16 )
__snake_case = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
__snake_case = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__snake_case = model.forward(**SCREAMING_SNAKE_CASE_ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(SCREAMING_SNAKE_CASE_ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = "gpt2-xl"
_SCREAMING_SNAKE_CASE : Optional[int] = 3.3191854854152187
| 56 | 0 |
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def UpperCamelCase( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : int=7 ):
lowerCAmelCase_ : Optional[int] = None
if token is not None:
lowerCAmelCase_ : Tuple = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
lowerCAmelCase_ : Dict = '''636036'''
lowerCAmelCase_ : Any = f"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
lowerCAmelCase_ : Dict = requests.get(lowercase__ ,headers=lowercase__ ).json()
return result["workflow_runs"]
def UpperCamelCase( __UpperCamelCase : List[Any] ):
lowerCAmelCase_ : Optional[int] = get_daily_ci_runs(lowercase__ )
lowerCAmelCase_ : Dict = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
lowerCAmelCase_ : str = workflow_run['''id''']
break
return workflow_run_id
def UpperCamelCase( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : int ,__UpperCamelCase : List[Any] ):
lowerCAmelCase_ : Optional[int] = get_last_daily_ci_runs(lowercase__ )
if workflow_run_id is not None:
lowerCAmelCase_ : Union[str, Any] = get_artifacts_links(worflow_run_id=lowercase__ ,token=lowercase__ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
lowerCAmelCase_ : str = artifacts_links[artifact_name]
download_artifact(
artifact_name=lowercase__ ,artifact_url=lowercase__ ,output_dir=lowercase__ ,token=lowercase__ )
def UpperCamelCase( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Dict ,__UpperCamelCase : int ):
get_last_daily_ci_artifacts(lowercase__ ,lowercase__ ,lowercase__ )
lowerCAmelCase_ : Dict = {}
for artifact_name in artifact_names:
lowerCAmelCase_ : Any = os.path.join(lowercase__ ,f"""{artifact_name}.zip""" )
if os.path.isfile(lowercase__ ):
lowerCAmelCase_ : Any = {}
with zipfile.ZipFile(lowercase__ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowercase__ ):
# read the file
with z.open(lowercase__ ) as f:
lowerCAmelCase_ : Union[str, Any] = f.read().decode('''UTF-8''' )
return results
| 171 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class _lowercase ( unittest.TestCase ):
def a ( self : int ) -> List[str]:
__snake_case = '| <pad> <unk> <s> </s> a b c d e f g h i j k'.split()
__snake_case = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
__snake_case = {
'unk_token': '<unk>',
'bos_token': '<s>',
'eos_token': '</s>',
}
__snake_case = {
'feature_size': 1,
'padding_value': 0.0,
'sampling_rate': 1_6000,
'return_attention_mask': False,
'do_normalize': True,
}
__snake_case = tempfile.mkdtemp()
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__snake_case = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE_ )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
with open(self.feature_extraction_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
# load decoder from hub
__snake_case = 'hf-internal-testing/ngram-beam-search-decoder'
def a ( self : Optional[int] , **SCREAMING_SNAKE_CASE_ : Tuple ) -> Dict:
__snake_case = self.add_kwargs_tokens_map.copy()
kwargs.update(SCREAMING_SNAKE_CASE_ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] , **SCREAMING_SNAKE_CASE_ : Any ) -> Optional[Any]:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def a ( self : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> Tuple:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **SCREAMING_SNAKE_CASE_ )
def a ( self : int ) -> Dict:
shutil.rmtree(self.tmpdirname )
def a ( self : int ) -> Tuple:
__snake_case = self.get_tokenizer()
__snake_case = self.get_feature_extractor()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
__snake_case = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE_ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , SCREAMING_SNAKE_CASE_ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , SCREAMING_SNAKE_CASE_ )
def a ( self : Dict ) -> Union[str, Any]:
__snake_case = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__snake_case = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def a ( self : str ) -> Tuple:
__snake_case = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['xx'] )
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE_ , 'include' ):
WavaVecaProcessorWithLM(
tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def a ( self : List[str] ) -> List[str]:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = floats_list((3, 1000) )
__snake_case = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
__snake_case = processor(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def a ( self : Tuple ) -> Tuple:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = 'This is a test string'
__snake_case = processor(text=SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer(SCREAMING_SNAKE_CASE_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a ( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any]=(2, 10, 16) , SCREAMING_SNAKE_CASE_ : Dict=77 ) -> Dict:
np.random.seed(SCREAMING_SNAKE_CASE_ )
return np.random.rand(*SCREAMING_SNAKE_CASE_ )
def a ( self : Any ) -> Tuple:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits(shape=(10, 16) , seed=13 )
__snake_case = processor.decode(SCREAMING_SNAKE_CASE_ )
__snake_case = decoder.decode_beams(SCREAMING_SNAKE_CASE_ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('</s> <s> </s>' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['fork'], ['spawn']] )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] ) -> Dict:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__snake_case = processor.batch_decode(SCREAMING_SNAKE_CASE_ )
else:
with get_context(SCREAMING_SNAKE_CASE_ ).Pool() as pool:
__snake_case = processor.batch_decode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = list(SCREAMING_SNAKE_CASE_ )
with get_context('fork' ).Pool() as p:
__snake_case = decoder.decode_beams_batch(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case , __snake_case , __snake_case = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.text )
self.assertListEqual(['<s> <s> </s>', '<s> <s> <s>'] , decoded_processor.text )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.logit_score )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.lm_score )
def a ( self : Any ) -> Dict:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits()
__snake_case = 15
__snake_case = -2_0.0
__snake_case = -4.0
__snake_case = processor.batch_decode(
SCREAMING_SNAKE_CASE_ , beam_width=SCREAMING_SNAKE_CASE_ , beam_prune_logp=SCREAMING_SNAKE_CASE_ , token_min_logp=SCREAMING_SNAKE_CASE_ , )
__snake_case = decoded_processor_out.text
__snake_case = list(SCREAMING_SNAKE_CASE_ )
with get_context('fork' ).Pool() as pool:
__snake_case = decoder.decode_beams_batch(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , beam_width=SCREAMING_SNAKE_CASE_ , beam_prune_logp=SCREAMING_SNAKE_CASE_ , token_min_logp=SCREAMING_SNAKE_CASE_ , )
__snake_case = [d[0][0] for d in decoded_decoder_out]
__snake_case = [d[0][2] for d in decoded_decoder_out]
__snake_case = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(['</s> <s> <s>', '<s> <s> <s>'] , SCREAMING_SNAKE_CASE_ )
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
def a ( self : Optional[Any] ) -> Tuple:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits()
__snake_case = 2.0
__snake_case = 5.0
__snake_case = -2_0.0
__snake_case = True
__snake_case = processor.batch_decode(
SCREAMING_SNAKE_CASE_ , alpha=SCREAMING_SNAKE_CASE_ , beta=SCREAMING_SNAKE_CASE_ , unk_score_offset=SCREAMING_SNAKE_CASE_ , lm_score_boundary=SCREAMING_SNAKE_CASE_ , )
__snake_case = decoded_processor_out.text
__snake_case = list(SCREAMING_SNAKE_CASE_ )
decoder.reset_params(
alpha=SCREAMING_SNAKE_CASE_ , beta=SCREAMING_SNAKE_CASE_ , unk_score_offset=SCREAMING_SNAKE_CASE_ , lm_score_boundary=SCREAMING_SNAKE_CASE_ , )
with get_context('fork' ).Pool() as pool:
__snake_case = decoder.decode_beams_batch(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
__snake_case = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(['<s> </s> <s> </s> </s>', '</s> </s> <s> </s> </s>'] , SCREAMING_SNAKE_CASE_ )
__snake_case = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -2_0.0 )
self.assertEqual(lm_model.score_boundary , SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] ) -> List[str]:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = processor.decoder.model_container[processor.decoder._model_key]
__snake_case = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
__snake_case = os.listdir(SCREAMING_SNAKE_CASE_ )
__snake_case = ['alphabet.json', 'language_model']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Dict ) -> Dict:
__snake_case = snapshot_download('hf-internal-testing/processor_with_lm' )
__snake_case = WavaVecaProcessorWithLM.from_pretrained(SCREAMING_SNAKE_CASE_ )
__snake_case = processor.decoder.model_container[processor.decoder._model_key]
__snake_case = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
__snake_case = os.listdir(SCREAMING_SNAKE_CASE_ )
__snake_case = os.listdir(SCREAMING_SNAKE_CASE_ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Any ) -> List[Any]:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = AutoProcessor.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = floats_list((3, 1000) )
__snake_case = processor_wavaveca(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
__snake_case = processor_auto(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
__snake_case = self._get_dummy_logits()
__snake_case = processor_wavaveca.batch_decode(SCREAMING_SNAKE_CASE_ )
__snake_case = processor_auto.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def a ( self : Dict ) -> Optional[int]:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
@staticmethod
def a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int:
__snake_case = [d[key] for d in offsets]
return retrieved_list
def a ( self : Optional[int] ) -> str:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = self._get_dummy_logits()[0]
__snake_case = processor.decode(SCREAMING_SNAKE_CASE_ , output_word_offsets=SCREAMING_SNAKE_CASE_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(' '.join(self.get_from_offsets(outputs['word_offsets'] , 'word' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'end_offset' ) , [1, 3, 5] )
def a ( self : Optional[Any] ) -> Optional[int]:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = self._get_dummy_logits()
__snake_case = processor.batch_decode(SCREAMING_SNAKE_CASE_ , output_word_offsets=SCREAMING_SNAKE_CASE_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(
[' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) for o in outputs['word_offsets']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'end_offset' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def a ( self : Optional[Any] ) -> Optional[Any]:
import torch
__snake_case = load_dataset('common_voice' , 'en' , split='train' , streaming=SCREAMING_SNAKE_CASE_ )
__snake_case = ds.cast_column('audio' , datasets.Audio(sampling_rate=1_6000 ) )
__snake_case = iter(SCREAMING_SNAKE_CASE_ )
__snake_case = next(SCREAMING_SNAKE_CASE_ )
__snake_case = AutoProcessor.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
__snake_case = WavaVecaForCTC.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__snake_case = processor(sample['audio']['array'] , return_tensors='pt' ).input_values
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ ).logits.cpu().numpy()
__snake_case = processor.decode(logits[0] , output_word_offsets=SCREAMING_SNAKE_CASE_ )
__snake_case = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__snake_case = [
{
'start_time': d['start_offset'] * time_offset,
'end_time': d['end_offset'] * time_offset,
'word': d['word'],
}
for d in output['word_offsets']
]
__snake_case = 'WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'
# output words
self.assertEqual(' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) , output.text )
# output times
__snake_case = torch.tensor(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'start_time' ) )
__snake_case = torch.tensor(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'end_time' ) )
# fmt: off
__snake_case = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
__snake_case = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=0.0_1 ) )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=0.0_1 ) )
| 56 | 0 |
from __future__ import annotations
def _snake_case (_snake_case : int , _snake_case : int) -> tuple[int, int]:
if b == 0:
return (1, 0)
((_lowercase) , (_lowercase)) =extended_euclid(lowercase__ , a % b)
_lowercase =a // b
return (y, x - k * y)
def _snake_case (_snake_case : int , _snake_case : int , _snake_case : int , _snake_case : int) -> int:
((_lowercase) , (_lowercase)) =extended_euclid(lowercase__ , lowercase__)
_lowercase =na * na
_lowercase =ra * x * na + ra * y * na
return (n % m + m) % m
def _snake_case (_snake_case : int , _snake_case : int) -> int:
((_lowercase) , (_lowercase)) =extended_euclid(lowercase__ , lowercase__)
if b < 0:
_lowercase =(b % n + n) % n
return b
def _snake_case (_snake_case : int , _snake_case : int , _snake_case : int , _snake_case : int) -> int:
_lowercase , _lowercase =invert_modulo(lowercase__ , lowercase__), invert_modulo(lowercase__ , lowercase__)
_lowercase =na * na
_lowercase =ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="chinese_remainder_theorem", verbose=True)
testmod(name="chinese_remainder_theorem2", verbose=True)
testmod(name="invert_modulo", verbose=True)
testmod(name="extended_euclid", verbose=True)
| 181 |
'''simple docstring'''
def _a (lowercase__ : int , lowercase__ : int ) -> float:
"""simple docstring"""
return base * power(lowercase__ , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print("Raise base to the power of exponent using recursion...")
_a : Union[str, Any] = int(input("Enter the base: ").strip())
_a : Any = int(input("Enter the exponent: ").strip())
_a : List[str] = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
_a : List[Any] = 1 / result
print(f'''{base} to the power of {exponent} is {result}''')
| 56 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["ViTFeatureExtractor"]
UpperCAmelCase_ = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 271 |
'''simple docstring'''
import math
from collections.abc import Callable
def _a (lowercase__ : Callable[[float], float] , lowercase__ : float , lowercase__ : float ) -> float:
"""simple docstring"""
__snake_case = xa
__snake_case = xa
while True:
if x_n == x_na or function(lowercase__ ) == function(lowercase__ ):
raise ZeroDivisionError('float division by zero, could not find root' )
__snake_case = x_na - (
function(lowercase__ ) / ((function(lowercase__ ) - function(lowercase__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 1_0**-5:
return x_na
__snake_case = x_na
__snake_case = x_na
def _a (lowercase__ : float ) -> float:
"""simple docstring"""
return math.pow(lowercase__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 56 | 0 |
"""simple docstring"""
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
__magic_name__ = "src/transformers"
__magic_name__ = "docs/source/en"
__magic_name__ = "."
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
with open(lowercase__ , "r" , encoding="utf-8" , newline="\n" ) as f:
_UpperCAmelCase = f.readlines()
# Find the start prompt.
_UpperCAmelCase = 0
while not lines[start_index].startswith(lowercase__ ):
start_index += 1
start_index += 1
_UpperCAmelCase = start_index
while not lines[end_index].startswith(lowercase__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
__magic_name__ = "Model|Encoder|Decoder|ForConditionalGeneration"
# Regexes that match TF/Flax/PT model names.
__magic_name__ = re.compile(r'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
__magic_name__ = re.compile(r'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__magic_name__ = re.compile(r'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# This is to make sure the transformers module imported is the one in the repo.
__magic_name__ = direct_transformers_import(TRANSFORMERS_PATH)
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" , lowercase__ )
return [m.group(0 ) for m in matches]
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = 2 if text == "✅" or text == "❌" else len(lowercase__ )
_UpperCAmelCase = (width - text_length) // 2
_UpperCAmelCase = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_UpperCAmelCase = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
_UpperCAmelCase = {name: config.replace("Config" , "" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
_UpperCAmelCase = collections.defaultdict(lowercase__ )
_UpperCAmelCase = collections.defaultdict(lowercase__ )
_UpperCAmelCase = collections.defaultdict(lowercase__ )
_UpperCAmelCase = collections.defaultdict(lowercase__ )
_UpperCAmelCase = collections.defaultdict(lowercase__ )
# Let's lookup through all transformers object (once).
for attr_name in dir(lowercase__ ):
_UpperCAmelCase = None
if attr_name.endswith("Tokenizer" ):
_UpperCAmelCase = slow_tokenizers
_UpperCAmelCase = attr_name[:-9]
elif attr_name.endswith("TokenizerFast" ):
_UpperCAmelCase = fast_tokenizers
_UpperCAmelCase = attr_name[:-13]
elif _re_tf_models.match(lowercase__ ) is not None:
_UpperCAmelCase = tf_models
_UpperCAmelCase = _re_tf_models.match(lowercase__ ).groups()[0]
elif _re_flax_models.match(lowercase__ ) is not None:
_UpperCAmelCase = flax_models
_UpperCAmelCase = _re_flax_models.match(lowercase__ ).groups()[0]
elif _re_pt_models.match(lowercase__ ) is not None:
_UpperCAmelCase = pt_models
_UpperCAmelCase = _re_pt_models.match(lowercase__ ).groups()[0]
if lookup_dict is not None:
while len(lowercase__ ) > 0:
if attr_name in model_name_to_prefix.values():
_UpperCAmelCase = True
break
# Try again after removing the last word in the name
_UpperCAmelCase = "".join(camel_case_split(lowercase__ )[:-1] )
# Let's build that table!
_UpperCAmelCase = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
_UpperCAmelCase = ["Model", "Tokenizer slow", "Tokenizer fast", "PyTorch support", "TensorFlow support", "Flax Support"]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
_UpperCAmelCase = [len(lowercase__ ) + 2 for c in columns]
_UpperCAmelCase = max([len(lowercase__ ) for name in model_names] ) + 2
# Build the table per se
_UpperCAmelCase = "|" + "|".join([_center_text(lowercase__ , lowercase__ ) for c, w in zip(lowercase__ , lowercase__ )] ) + "|\n"
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([":" + "-" * (w - 2) + ":" for w in widths] ) + "|\n"
_UpperCAmelCase = {True: "✅", False: "❌"}
for name in model_names:
_UpperCAmelCase = model_name_to_prefix[name]
_UpperCAmelCase = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(lowercase__ , lowercase__ ) for l, w in zip(lowercase__ , lowercase__ )] ) + "|\n"
return table
def __lowerCamelCase ( UpperCamelCase__=False ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = _find_text_in_file(
filename=os.path.join(lowercase__ , "index.md" ) , start_prompt="<!--This table is updated automatically from the auto modules" , end_prompt="<!-- End table-->" , )
_UpperCAmelCase = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(lowercase__ , "index.md" ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this." )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__magic_name__ = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 657 |
'''simple docstring'''
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class _lowercase ( __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : str = CpmAntTokenizer
_SCREAMING_SNAKE_CASE : Optional[Any] = False
def a ( self : Optional[Any] ) -> Any:
super().setUp()
__snake_case = [
'<d>',
'</d>',
'<s>',
'</s>',
'</_>',
'<unk>',
'<pad>',
'</n>',
'我',
'是',
'C',
'P',
'M',
'A',
'n',
't',
]
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
@tooslow
def a ( self : List[Any] ) -> Dict:
__snake_case = CpmAntTokenizer.from_pretrained('openbmb/cpm-ant-10b' )
__snake_case = '今天天气真好!'
__snake_case = ['今天', '天气', '真', '好', '!']
__snake_case = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = '今天天气真好!'
__snake_case = [tokenizer.bos_token] + tokens
__snake_case = [6, 9802, 1_4962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 56 | 0 |
'''simple docstring'''
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __UpperCamelCase (__lowercase , unittest.TestCase ):
__A = MobileBertTokenizer
__A = MobileBertTokenizerFast
__A = True
__A = True
__A = filter_non_english
__A = "google/mobilebert-uncased"
def _a ( self ) -> Any:
'''simple docstring'''
super().setUp()
lowercase = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
lowercase = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def _a ( self , _lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase = """UNwant\u00E9d,running"""
lowercase = """unwanted, running"""
return input_text, output_text
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = self.tokenizer_class(self.vocab_file )
lowercase = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [9, 6, 7, 12, 10, 11] )
def _a ( self ) -> List[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowercase = self.get_tokenizer()
lowercase = self.get_rust_tokenizer()
lowercase = """UNwant\u00E9d,running"""
lowercase = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
lowercase = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
lowercase = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase = self.get_rust_tokenizer()
lowercase = tokenizer.encode(SCREAMING_SNAKE_CASE_ )
lowercase = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# With lower casing
lowercase = self.get_tokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ )
lowercase = self.get_rust_tokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ )
lowercase = """UNwant\u00E9d,running"""
lowercase = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
lowercase = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
lowercase = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase = self.get_rust_tokenizer()
lowercase = tokenizer.encode(SCREAMING_SNAKE_CASE_ )
lowercase = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def _a ( self ) -> int:
'''simple docstring'''
lowercase = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
lowercase = {}
for i, token in enumerate(SCREAMING_SNAKE_CASE_ ):
lowercase = i
lowercase = WordpieceTokenizer(vocab=SCREAMING_SNAKE_CASE_ , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def _a ( self ) -> Optional[int]:
'''simple docstring'''
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def _a ( self ) -> List[str]:
'''simple docstring'''
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def _a ( self ) -> Optional[int]:
'''simple docstring'''
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = self.get_tokenizer()
lowercase = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
self.assertListEqual(
[rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
@slow
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = self.tokenizer_class.from_pretrained("""google/mobilebert-uncased""" )
lowercase = tokenizer.encode("""sequence builders""" , add_special_tokens=SCREAMING_SNAKE_CASE_ )
lowercase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=SCREAMING_SNAKE_CASE_ )
lowercase = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ )
lowercase = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def _a ( self ) -> List[Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowercase = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
lowercase = tokenizer_r.encode_plus(
SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , )
lowercase = tokenizer_r.do_lower_case if hasattr(SCREAMING_SNAKE_CASE_ , """do_lower_case""" ) else False
lowercase = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = ["""的""", """人""", """有"""]
lowercase = """""".join(SCREAMING_SNAKE_CASE_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase = True
lowercase = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowercase = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowercase = tokenizer_p.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
lowercase = tokenizer_r.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
lowercase = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
lowercase = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase = False
lowercase = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowercase = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowercase = tokenizer_r.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
lowercase = tokenizer_p.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
lowercase = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
lowercase = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
# it is expected that only the first Chinese character is not preceded by "##".
lowercase = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(SCREAMING_SNAKE_CASE_ )
]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 588 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def _a (lowercase__ : list ) -> int:
"""simple docstring"""
if not postfix_notation:
return 0
__snake_case = {'+', '-', '*', '/'}
__snake_case = []
for token in postfix_notation:
if token in operations:
__snake_case , __snake_case = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(lowercase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 | 0 |
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def __magic_name__ ( __UpperCAmelCase ) -> List[Tuple[int, ...]]:
'''simple docstring'''
snake_case_ = []
if isinstance(lowercase__, lowercase__ ):
for v in tree.values():
shapes.extend(_fetch_dims(lowercase__ ) )
elif isinstance(lowercase__, (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(lowercase__ ) )
elif isinstance(lowercase__, torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('''Not supported''' )
return shapes
@torch.jit.ignore
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Tuple[int, ...]:
'''simple docstring'''
snake_case_ = []
for d in reversed(lowercase__ ):
idx.append(flat_idx % d )
snake_case_ = flat_idx // d
return tuple(reversed(lowercase__ ) )
@torch.jit.ignore
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase = None, __UpperCAmelCase = None, ) -> List[Tuple[slice, ...]]:
'''simple docstring'''
def reduce_edge_list(__UpperCAmelCase ) -> None:
snake_case_ = True
for i in range(len(lowercase__ ) ):
snake_case_ = -1 * (i + 1)
l[reversed_idx] &= tally
snake_case_ = l[reversed_idx]
if start_edges is None:
snake_case_ = [s == 0 for s in start]
reduce_edge_list(lowercase__ )
if end_edges is None:
snake_case_ = [e == (d - 1) for e, d in zip(lowercase__, lowercase__ )]
reduce_edge_list(lowercase__ )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(lowercase__ ) == 0:
return [()]
elif len(lowercase__ ) == 1:
return [(slice(start[0], end[0] + 1 ),)]
snake_case_ = []
snake_case_ = []
# Dimensions common to start and end can be selected directly
for s, e in zip(lowercase__, lowercase__ ):
if s == e:
path_list.append(slice(lowercase__, s + 1 ) )
else:
break
snake_case_ = tuple(lowercase__ )
snake_case_ = len(lowercase__ )
# start == end, and we're done
if divergence_idx == len(lowercase__ ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
snake_case_ = start[divergence_idx]
return tuple(
path + (slice(lowercase__, sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :], [d - 1 for d in dims[divergence_idx + 1 :]], dims[divergence_idx + 1 :], start_edges=start_edges[divergence_idx + 1 :], end_edges=[True for _ in end_edges[divergence_idx + 1 :]], ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
snake_case_ = end[divergence_idx]
return tuple(
path + (slice(lowercase__, edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]], end[divergence_idx + 1 :], dims[divergence_idx + 1 :], start_edges=[True for _ in start_edges[divergence_idx + 1 :]], end_edges=end_edges[divergence_idx + 1 :], ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx], end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx], end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1, end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
snake_case_ = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1, end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> torch.Tensor:
'''simple docstring'''
snake_case_ = t.shape[:no_batch_dims]
snake_case_ = list(_flat_idx_to_idx(lowercase__, lowercase__ ) )
# _get_minimal_slice_set is inclusive
snake_case_ = list(_flat_idx_to_idx(flat_end - 1, lowercase__ ) )
# Get an ordered list of slices to perform
snake_case_ = _get_minimal_slice_set(
lowercase__, lowercase__, lowercase__, )
snake_case_ = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase = False, __UpperCAmelCase = None, __UpperCAmelCase = False, ) -> Any:
'''simple docstring'''
if not (len(lowercase__ ) > 0):
raise ValueError('''Must provide at least one input''' )
snake_case_ = [shape[:no_batch_dims] for shape in _fetch_dims(lowercase__ )]
snake_case_ = tuple([max(lowercase__ ) for s in zip(*lowercase__ )] )
def _prep_inputs(__UpperCAmelCase ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
snake_case_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
snake_case_ = t.reshape(-1, *t.shape[no_batch_dims:] )
else:
snake_case_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
snake_case_ = tensor_tree_map(_prep_inputs, lowercase__ )
snake_case_ = None
if _out is not None:
snake_case_ = tensor_tree_map(lambda __UpperCAmelCase : t.view([-1] + list(t.shape[no_batch_dims:] ) ), _out )
snake_case_ = 1
for d in orig_batch_dims:
flat_batch_dim *= d
snake_case_ = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(__UpperCAmelCase ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
snake_case_ = 0
snake_case_ = prepped_outputs
for _ in range(lowercase__ ):
# Chunk the input
if not low_mem:
snake_case_ = _select_chunk
else:
snake_case_ = partial(
_chunk_slice, flat_start=lowercase__, flat_end=min(lowercase__, i + chunk_size ), no_batch_dims=len(lowercase__ ), )
snake_case_ = tensor_tree_map(lowercase__, lowercase__ )
# Run the layer on the chunk
snake_case_ = layer(**lowercase__ )
# Allocate space for the output
if out is None:
snake_case_ = tensor_tree_map(lambda __UpperCAmelCase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ), lowercase__ )
# Put the chunk in its pre-allocated space
if isinstance(lowercase__, lowercase__ ):
def assign(__UpperCAmelCase, __UpperCAmelCase ) -> None:
for k, v in da.items():
if isinstance(lowercase__, lowercase__ ):
assign(lowercase__, da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
snake_case_ = da[k]
assign(lowercase__, lowercase__ )
elif isinstance(lowercase__, lowercase__ ):
for xa, xa in zip(lowercase__, lowercase__ ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
snake_case_ = xa
elif isinstance(lowercase__, torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
snake_case_ = output_chunk
else:
raise ValueError('''Not supported''' )
i += chunk_size
snake_case_ = tensor_tree_map(lambda __UpperCAmelCase : t.view(orig_batch_dims + t.shape[1:] ), lowercase__ )
return out
class a :
def __init__( self : str , lowercase_ : int = 512 , ):
snake_case_ = max_chunk_size
snake_case_ = None
snake_case_ = None
def A_ ( self : List[Any] , lowercase_ : Callable , lowercase_ : tuple , lowercase_ : int ):
logging.info('''Tuning chunk size...''' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
snake_case_ = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
snake_case_ = [c for c in candidates if c > min_chunk_size]
snake_case_ = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(lowercase_ : int ) -> bool:
try:
with torch.no_grad():
fn(*SCREAMING_SNAKE_CASE_ , chunk_size=SCREAMING_SNAKE_CASE_ )
return True
except RuntimeError:
return False
snake_case_ = 0
snake_case_ = len(SCREAMING_SNAKE_CASE_ ) - 1
while i > min_viable_chunk_size_index:
snake_case_ = test_chunk_size(candidates[i] )
if not viable:
snake_case_ = (min_viable_chunk_size_index + i) // 2
else:
snake_case_ = i
snake_case_ = (i + len(SCREAMING_SNAKE_CASE_ ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def A_ ( self : Any , lowercase_ : Iterable , lowercase_ : Iterable ):
snake_case_ = True
for aa, aa in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
assert type(SCREAMING_SNAKE_CASE_ ) == type(SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ):
consistent &= self._compare_arg_caches(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
snake_case_ = [v for _, v in sorted(aa.items() , key=lambda lowercase_ : x[0] )]
snake_case_ = [v for _, v in sorted(aa.items() , key=lambda lowercase_ : x[0] )]
consistent &= self._compare_arg_caches(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
consistent &= aa == aa
return consistent
def A_ ( self : List[Any] , lowercase_ : Callable , lowercase_ : tuple , lowercase_ : int , ):
snake_case_ = True
snake_case_ = tree_map(lambda lowercase_ : a.shape if isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ) else a , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(SCREAMING_SNAKE_CASE_ )
snake_case_ = self._compare_arg_caches(self.cached_arg_data , SCREAMING_SNAKE_CASE_ )
else:
# Otherwise, we can reuse the precomputed value
snake_case_ = False
if not consistent:
snake_case_ = self._determine_favorable_chunk_size(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
snake_case_ = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 640 |
'''simple docstring'''
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square(lowercase__ : int , lowercase__ : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
__snake_case = update_area_of_max_square(lowercase__ , col + 1 )
__snake_case = update_area_of_max_square(row + 1 , col + 1 )
__snake_case = update_area_of_max_square(row + 1 , lowercase__ )
if mat[row][col]:
__snake_case = 1 + min([right, diagonal, down] )
__snake_case = max(largest_square_area[0] , lowercase__ )
return sub_problem_sol
else:
return 0
__snake_case = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
__snake_case = update_area_of_max_square_using_dp_array(lowercase__ , col + 1 , lowercase__ )
__snake_case = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , lowercase__ )
__snake_case = update_area_of_max_square_using_dp_array(row + 1 , lowercase__ , lowercase__ )
if mat[row][col]:
__snake_case = 1 + min([right, diagonal, down] )
__snake_case = max(largest_square_area[0] , lowercase__ )
__snake_case = sub_problem_sol
return sub_problem_sol
else:
return 0
__snake_case = [0]
__snake_case = [[-1] * cols for _ in range(lowercase__ )]
update_area_of_max_square_using_dp_array(0 , 0 , lowercase__ )
return largest_square_area[0]
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
"""simple docstring"""
__snake_case = [[0] * (cols + 1) for _ in range(rows + 1 )]
__snake_case = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__snake_case = dp_array[row][col + 1]
__snake_case = dp_array[row + 1][col + 1]
__snake_case = dp_array[row + 1][col]
if mat[row][col] == 1:
__snake_case = 1 + min(lowercase__ , lowercase__ , lowercase__ )
__snake_case = max(dp_array[row][col] , lowercase__ )
else:
__snake_case = 0
return largest_square_area
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
"""simple docstring"""
__snake_case = [0] * (cols + 1)
__snake_case = [0] * (cols + 1)
__snake_case = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__snake_case = current_row[col + 1]
__snake_case = next_row[col + 1]
__snake_case = next_row[col]
if mat[row][col] == 1:
__snake_case = 1 + min(lowercase__ , lowercase__ , lowercase__ )
__snake_case = max(current_row[col] , lowercase__ )
else:
__snake_case = 0
__snake_case = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 56 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"google/bit-50": "https://huggingface.co/google/bit-50/resolve/main/config.json",
}
class lowerCamelCase (__lowercase , __lowercase ):
'''simple docstring'''
_snake_case : Any = "bit"
_snake_case : List[str] = ["preactivation", "bottleneck"]
_snake_case : List[str] = ["SAME", "VALID"]
def __init__( self , _UpperCamelCase=3 , _UpperCamelCase=6_4 , _UpperCamelCase=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , _UpperCamelCase=[3, 4, 6, 3] , _UpperCamelCase="preactivation" , _UpperCamelCase="relu" , _UpperCamelCase=None , _UpperCamelCase=3_2 , _UpperCamelCase=0.0 , _UpperCamelCase=False , _UpperCamelCase=3_2 , _UpperCamelCase=1 , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase , ) -> Any:
super().__init__(**SCREAMING_SNAKE_CASE_ )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
UpperCAmelCase_ : int = global_padding.upper()
else:
raise ValueError(f"Padding strategy {global_padding} not supported" )
UpperCAmelCase_ : List[Any] = num_channels
UpperCAmelCase_ : List[Any] = embedding_size
UpperCAmelCase_ : str = hidden_sizes
UpperCAmelCase_ : Any = depths
UpperCAmelCase_ : Optional[Any] = layer_type
UpperCAmelCase_ : str = hidden_act
UpperCAmelCase_ : Union[str, Any] = global_padding
UpperCAmelCase_ : int = num_groups
UpperCAmelCase_ : List[Any] = drop_path_rate
UpperCAmelCase_ : Dict = embedding_dynamic_padding
UpperCAmelCase_ : Tuple = output_stride
UpperCAmelCase_ : List[str] = width_factor
UpperCAmelCase_ : Tuple = ['stem'] + [f"stage{idx}" for idx in range(1 , len(SCREAMING_SNAKE_CASE_ ) + 1 )]
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = get_aligned_output_features_output_indices(
out_features=SCREAMING_SNAKE_CASE_ , out_indices=SCREAMING_SNAKE_CASE_ , stage_names=self.stage_names )
| 406 |
'''simple docstring'''
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def _a () -> Union[str, Any]:
"""simple docstring"""
__snake_case = 1_0
__snake_case = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
__snake_case = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [9_7], 'text': ['1976']}] * 1_0,
'id': list(range(lowercase__ ) ),
} , features=lowercase__ , )
return dataset
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple , lowercase__ : Dict ) -> Tuple:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=lowercase__ )
return filename
# FILE_CONTENT + files
_a : Union[str, Any] = "\\n Text data.\n Second line of data."
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> List[str]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt'
__snake_case = FILE_CONTENT
with open(lowercase__ , 'w' ) as f:
f.write(lowercase__ )
return filename
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
import bza
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
__snake_case = bytes(lowercase__ , 'utf-8' )
with bza.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Union[str, Any] ) -> Dict:
"""simple docstring"""
import gzip
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
__snake_case = bytes(lowercase__ , 'utf-8' )
with gzip.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple ) -> Optional[int]:
"""simple docstring"""
if datasets.config.LZ4_AVAILABLE:
import lza.frame
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
__snake_case = bytes(lowercase__ , 'utf-8' )
with lza.frame.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Dict , lowercase__ : Tuple ) -> Tuple:
"""simple docstring"""
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(lowercase__ , 'w' ) as archive:
archive.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[int] , lowercase__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
import tarfile
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(lowercase__ , 'w' ) as f:
f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> Tuple:
"""simple docstring"""
import lzma
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
__snake_case = bytes(lowercase__ , 'utf-8' )
with lzma.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Dict , lowercase__ : str ) -> Union[str, Any]:
"""simple docstring"""
import zipfile
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> int:
"""simple docstring"""
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
__snake_case = bytes(lowercase__ , 'utf-8' )
with zstd.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.xml'
__snake_case = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(lowercase__ , 'w' ) as f:
f.write(lowercase__ )
return filename
_a : int = [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
_a : List[str] = [
{"col_1": "4", "col_2": 4, "col_3": 4.0},
{"col_1": "5", "col_2": 5, "col_3": 5.0},
]
_a : Tuple = {
"col_1": ["0", "1", "2", "3"],
"col_2": [0, 1, 2, 3],
"col_3": [0.0, 1.0, 2.0, 3.0],
}
_a : Optional[int] = [
{"col_3": 0.0, "col_1": "0", "col_2": 0},
{"col_3": 1.0, "col_1": "1", "col_2": 1},
]
_a : Any = [
{"col_1": "s0", "col_2": 0, "col_3": 0.0},
{"col_1": "s1", "col_2": 1, "col_3": 1.0},
{"col_1": "s2", "col_2": 2, "col_3": 2.0},
{"col_1": "s3", "col_2": 3, "col_3": 3.0},
]
@pytest.fixture(scope='session' )
def _a () -> Optional[Any]:
"""simple docstring"""
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case = datasets.Dataset.from_dict(lowercase__ )
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Any ) -> Dict:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(lowercase__ ) ) as con:
__snake_case = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> Tuple:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(lowercase__ , 'w' , newline='' ) as f:
__snake_case = csv.DictWriter(lowercase__ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> Tuple:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(lowercase__ , 'w' , newline='' ) as f:
__snake_case = csv.DictWriter(lowercase__ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : str , lowercase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
import bza
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(lowercase__ , 'rb' ) as f:
__snake_case = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] , lowercase__ : Union[str, Any] , lowercase__ : str ) -> Optional[int]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] , lowercase__ : Tuple , lowercase__ : int ) -> int:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(lowercase__ , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] , lowercase__ : Dict , lowercase__ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] ) -> int:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
__snake_case = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(lowercase__ , 'wb' ) as f:
__snake_case = pq.ParquetWriter(lowercase__ , schema=lowercase__ )
__snake_case = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowercase__ ) )] for k in DATA[0]} , schema=lowercase__ )
writer.write_table(lowercase__ )
writer.close()
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> List[str]:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
__snake_case = {'data': DATA}
with open(lowercase__ , 'w' ) as f:
json.dump(lowercase__ , lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] ) -> List[Any]:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
__snake_case = {'data': DATA_DICT_OF_LISTS}
with open(lowercase__ , 'w' ) as f:
json.dump(lowercase__ , lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(lowercase__ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowercase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(lowercase__ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowercase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : int ) -> Tuple:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(lowercase__ , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(lowercase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Dict ) -> int:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(lowercase__ , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(lowercase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : int , lowercase__ : List[Any] ) -> Dict:
"""simple docstring"""
import gzip
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(lowercase__ , 'rb' ) as orig_file:
with gzip.open(lowercase__ , 'wb' ) as zipped_file:
zipped_file.writelines(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Union[str, Any] , lowercase__ : Dict ) -> Optional[Any]:
"""simple docstring"""
import gzip
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(lowercase__ , 'rb' ) as orig_file:
with gzip.open(lowercase__ , 'wb' ) as zipped_file:
zipped_file.writelines(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : str , lowercase__ : str , lowercase__ : str ) -> Optional[int]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Dict , lowercase__ : Optional[int] , lowercase__ : List[Any] , lowercase__ : List[Any] ) -> str:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.join('nested' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : str ) -> Optional[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : str , lowercase__ : Optional[int] , lowercase__ : Tuple ) -> List[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(lowercase__ , 'w' ) as f:
f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple , lowercase__ : Tuple , lowercase__ : Dict , lowercase__ : int ) -> Optional[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(lowercase__ , 'w' ) as f:
f.add(lowercase__ , arcname=os.path.join('nested' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__snake_case = ['0', '1', '2', '3']
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(lowercase__ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] ) -> List[Any]:
"""simple docstring"""
__snake_case = ['0', '1', '2', '3']
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(lowercase__ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[int] ) -> Dict:
"""simple docstring"""
__snake_case = ['0', '1', '2', '3']
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(lowercase__ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] , lowercase__ : Union[str, Any] , lowercase__ : Any ) -> str:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple , lowercase__ : Any , lowercase__ : Tuple ) -> List[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] , lowercase__ : Optional[int] , lowercase__ : Any ) -> Union[str, Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename('unsupported.ext' ) )
f.write(lowercase__ , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Any ) -> List[Any]:
"""simple docstring"""
__snake_case = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(lowercase__ , 'w' , encoding='utf-8' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a () -> int:
"""simple docstring"""
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def _a () -> Optional[int]:
"""simple docstring"""
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] , lowercase__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : str ) -> List[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 1_0 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 1_0 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
return data_dir
| 56 | 0 |
'''simple docstring'''
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : str =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : str ={
"google/owlvit-base-patch32": "https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json",
"google/owlvit-base-patch16": "https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json",
"google/owlvit-large-patch14": "https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( __lowercase ):
"""simple docstring"""
A__ : Any = "owlvit_text_model"
def __init__( self , A=4_94_08 , A=5_12 , A=20_48 , A=12 , A=8 , A=16 , A="quick_gelu" , A=1e-5 , A=0.0 , A=0.02 , A=1.0 , A=0 , A=4_94_06 , A=4_94_07 , **A , ) -> Tuple:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
A: Union[str, Any] = vocab_size
A: Optional[int] = hidden_size
A: Tuple = intermediate_size
A: Any = num_hidden_layers
A: Any = num_attention_heads
A: List[str] = max_position_embeddings
A: Union[str, Any] = hidden_act
A: Tuple = layer_norm_eps
A: int = attention_dropout
A: str = initializer_range
A: int = initializer_factor
@classmethod
def a__ ( cls , A , **A ) -> "PretrainedConfig":
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE_ )
A , A: Optional[int] = cls.get_config_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
A: Union[str, Any] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
class SCREAMING_SNAKE_CASE__ ( __lowercase ):
"""simple docstring"""
A__ : Any = "owlvit_vision_model"
def __init__( self , A=7_68 , A=30_72 , A=12 , A=12 , A=3 , A=7_68 , A=32 , A="quick_gelu" , A=1e-5 , A=0.0 , A=0.02 , A=1.0 , **A , ) -> Optional[int]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
A: str = hidden_size
A: Optional[int] = intermediate_size
A: Dict = num_hidden_layers
A: str = num_attention_heads
A: Union[str, Any] = num_channels
A: Union[str, Any] = image_size
A: Optional[Any] = patch_size
A: List[Any] = hidden_act
A: str = layer_norm_eps
A: List[Any] = attention_dropout
A: int = initializer_range
A: Optional[Any] = initializer_factor
@classmethod
def a__ ( cls , A , **A ) -> "PretrainedConfig":
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE_ )
A , A: Dict = cls.get_config_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
A: int = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
class SCREAMING_SNAKE_CASE__ ( __lowercase ):
"""simple docstring"""
A__ : Union[str, Any] = "owlvit"
A__ : int = True
def __init__( self , A=None , A=None , A=5_12 , A=2.6592 , A=True , **A , ) -> Dict:
super().__init__(**SCREAMING_SNAKE_CASE_ )
if text_config is None:
A: Any = {}
logger.info("""text_config is None. Initializing the OwlViTTextConfig with default values.""" )
if vision_config is None:
A: Union[str, Any] = {}
logger.info("""vision_config is None. initializing the OwlViTVisionConfig with default values.""" )
A: Optional[Any] = OwlViTTextConfig(**SCREAMING_SNAKE_CASE_ )
A: str = OwlViTVisionConfig(**SCREAMING_SNAKE_CASE_ )
A: Dict = projection_dim
A: Optional[int] = logit_scale_init_value
A: Dict = return_dict
A: Union[str, Any] = 1.0
@classmethod
def a__ ( cls , A , **A ) -> "PretrainedConfig":
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE_ )
A , A: List[Any] = cls.get_config_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@classmethod
def a__ ( cls , A , A , **A ) -> Any:
A: str = {}
A: str = text_config
A: str = vision_config
return cls.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def a__ ( self ) -> str:
A: Optional[int] = copy.deepcopy(self.__dict__ )
A: List[Any] = self.text_config.to_dict()
A: List[Any] = self.vision_config.to_dict()
A: Dict = self.__class__.model_type
return output
class SCREAMING_SNAKE_CASE__ ( __lowercase ):
"""simple docstring"""
@property
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
] )
@property
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""logits_per_image""", {0: """batch"""}),
("""logits_per_text""", {0: """batch"""}),
("""text_embeds""", {0: """batch"""}),
("""image_embeds""", {0: """batch"""}),
] )
@property
def a__ ( self ) -> float:
return 1e-4
def a__ ( self , A , A = -1 , A = -1 , A = None , ) -> Mapping[str, Any]:
A: List[str] = super().generate_dummy_inputs(
processor.tokenizer , batch_size=SCREAMING_SNAKE_CASE_ , seq_length=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ )
A: Any = super().generate_dummy_inputs(
processor.image_processor , batch_size=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ )
return {**text_input_dict, **image_input_dict}
@property
def a__ ( self ) -> int:
return 14
| 135 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a : Optional[Any] = logging.get_logger(__name__)
_a : Tuple = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = "camembert"
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3_0522 , SCREAMING_SNAKE_CASE_ : str=768 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=12 , SCREAMING_SNAKE_CASE_ : Dict=12 , SCREAMING_SNAKE_CASE_ : Optional[Any]=3072 , SCREAMING_SNAKE_CASE_ : Tuple="gelu" , SCREAMING_SNAKE_CASE_ : List[str]=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=512 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : Any=0.0_2 , SCREAMING_SNAKE_CASE_ : Tuple=1e-12 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1 , SCREAMING_SNAKE_CASE_ : Dict=0 , SCREAMING_SNAKE_CASE_ : int=2 , SCREAMING_SNAKE_CASE_ : Dict="absolute" , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : Tuple=None , **SCREAMING_SNAKE_CASE_ : Dict , ) -> int:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = position_embedding_type
__snake_case = use_cache
__snake_case = classifier_dropout
class _lowercase ( __lowercase ):
@property
def a ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__snake_case = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 56 | 0 |
"""simple docstring"""
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
__SCREAMING_SNAKE_CASE = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase :Optional[Any] = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
lowerCAmelCase :str = get_sagemaker_input()
else:
lowerCAmelCase :Any = get_cluster_input()
return config
def UpperCAmelCase ( a__=None ):
'''simple docstring'''
if subparsers is not None:
lowerCAmelCase :Optional[Any] = subparsers.add_parser('config' , description=lowercase__ )
else:
lowerCAmelCase :Dict = argparse.ArgumentParser('Accelerate config command' , description=lowercase__ )
parser.add_argument(
'--config_file' , default=lowercase__ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=lowercase__ )
return parser
def UpperCAmelCase ( a__ ):
'''simple docstring'''
lowerCAmelCase :List[str] = get_user_input()
if args.config_file is not None:
lowerCAmelCase :Dict = args.config_file
else:
if not os.path.isdir(lowercase__ ):
os.makedirs(lowercase__ )
lowerCAmelCase :List[str] = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(lowercase__ )
else:
config.to_yaml_file(lowercase__ )
print(F"""accelerate configuration saved at {config_file}""" )
def UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase :Optional[int] = config_command_parser()
lowerCAmelCase :List[str] = parser.parse_args()
config_command(lowercase__ )
if __name__ == "__main__":
main()
| 553 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : List[str] = logging.get_logger(__name__)
_a : Dict = {
"facebook/timesformer": "https://huggingface.co/facebook/timesformer/resolve/main/config.json",
}
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : int = "timesformer"
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : List[str]=224 , SCREAMING_SNAKE_CASE_ : List[str]=16 , SCREAMING_SNAKE_CASE_ : Any=3 , SCREAMING_SNAKE_CASE_ : int=8 , SCREAMING_SNAKE_CASE_ : Tuple=768 , SCREAMING_SNAKE_CASE_ : int=12 , SCREAMING_SNAKE_CASE_ : Optional[int]=12 , SCREAMING_SNAKE_CASE_ : Optional[int]=3072 , SCREAMING_SNAKE_CASE_ : Tuple="gelu" , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.0 , SCREAMING_SNAKE_CASE_ : List[Any]=0.0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0_2 , SCREAMING_SNAKE_CASE_ : Any=1e-6 , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : List[str]="divided_space_time" , SCREAMING_SNAKE_CASE_ : int=0 , **SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> List[str]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = num_frames
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = qkv_bias
__snake_case = attention_type
__snake_case = drop_path_rate
| 56 | 0 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_lowerCamelCase = "pt"
elif is_tf_available():
_lowerCamelCase = "tf"
else:
_lowerCamelCase = "jax"
class UpperCamelCase_ ( __lowercase , unittest.TestCase ):
lowerCamelCase_ = ByTaTokenizer
lowerCamelCase_ = False
def _snake_case ( self :int ) -> int:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE__ = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _snake_case ( self :Any ) -> Optional[int]:
"""simple docstring"""
return ByTaTokenizer.from_pretrained("""google/byt5-small""" )
def _snake_case ( self :Union[str, Any] , **__A :Tuple ) -> ByTaTokenizer:
"""simple docstring"""
return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _snake_case ( self :Dict , __A :Optional[Any] , __A :str=False , __A :Dict=20 , __A :List[str]=5 ) -> Tuple[str, list]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = []
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
try:
SCREAMING_SNAKE_CASE__ = tokenizer.decode([i] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
SCREAMING_SNAKE_CASE__ = list(filter(lambda __A : re.match(r"""^[ a-zA-Z]+$""" , t[1] ) , SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE__ = list(filter(lambda __A : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) )
if max_length is not None and len(SCREAMING_SNAKE_CASE_ ) > max_length:
SCREAMING_SNAKE_CASE__ = toks[:max_length]
if min_length is not None and len(SCREAMING_SNAKE_CASE_ ) < min_length and len(SCREAMING_SNAKE_CASE_ ) > 0:
while len(SCREAMING_SNAKE_CASE_ ) < min_length:
SCREAMING_SNAKE_CASE__ = toks + toks
# toks_str = [t[1] for t in toks]
SCREAMING_SNAKE_CASE__ = [t[0] for t in toks]
# Ensure consistency
SCREAMING_SNAKE_CASE__ = tokenizer.decode(SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
if " " not in output_txt and len(SCREAMING_SNAKE_CASE_ ) > 1:
SCREAMING_SNAKE_CASE__ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
)
if with_prefix_space:
SCREAMING_SNAKE_CASE__ = """ """ + output_txt
SCREAMING_SNAKE_CASE__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
return output_txt, output_ids
def _snake_case ( self :Optional[int] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE__ = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] )
SCREAMING_SNAKE_CASE__ = tokenizer(["""hi""", """I went to the gym""", """"""] )
self.assertListEqual(batch_with_eos_added["""input_ids"""] , batch_without_eos_added["""input_ids"""] )
def _snake_case ( self :Any ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE__ = """Unicode €."""
SCREAMING_SNAKE_CASE__ = tokenizer(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE__ = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["""input_ids"""] , SCREAMING_SNAKE_CASE_ )
# decoding
SCREAMING_SNAKE_CASE__ = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , """Unicode €.</s>""" )
SCREAMING_SNAKE_CASE__ = tokenizer("""e è é ê ë""" )
SCREAMING_SNAKE_CASE__ = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["""input_ids"""] , SCREAMING_SNAKE_CASE_ )
# decoding
SCREAMING_SNAKE_CASE__ = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , """e è é ê ë</s>""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """e è é ê ë</s>""" )
def _snake_case ( self :int ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE__ = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
SCREAMING_SNAKE_CASE__ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
SCREAMING_SNAKE_CASE__ = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if FRAMEWORK != "jax":
SCREAMING_SNAKE_CASE__ = list(batch.input_ids.numpy()[0] )
else:
SCREAMING_SNAKE_CASE__ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def _snake_case ( self :Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE__ = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
SCREAMING_SNAKE_CASE__ = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""" , SCREAMING_SNAKE_CASE_ )
self.assertIn("""attention_mask""" , SCREAMING_SNAKE_CASE_ )
self.assertNotIn("""decoder_input_ids""" , SCREAMING_SNAKE_CASE_ )
self.assertNotIn("""decoder_attention_mask""" , SCREAMING_SNAKE_CASE_ )
def _snake_case ( self :int ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE__ = [
"""Summary of the text.""",
"""Another summary.""",
]
SCREAMING_SNAKE_CASE__ = tokenizer(
text_target=SCREAMING_SNAKE_CASE_ , max_length=32 , padding="""max_length""" , truncation=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def _snake_case ( self :Dict ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE__ = ["""A long paragraph for summarization. </s>"""]
SCREAMING_SNAKE_CASE__ = ["""Summary of the text. </s>"""]
# fmt: off
SCREAMING_SNAKE_CASE__ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
SCREAMING_SNAKE_CASE__ = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
SCREAMING_SNAKE_CASE__ = tokenizer(SCREAMING_SNAKE_CASE_ , text_target=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , batch["""input_ids"""][0] )
self.assertEqual(SCREAMING_SNAKE_CASE_ , batch["""labels"""][0] )
def _snake_case ( self :Tuple ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
SCREAMING_SNAKE_CASE__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ = """ He is very happy, UNwant\u00E9d,running"""
SCREAMING_SNAKE_CASE__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE__ = tokenizer.__class__.from_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE__ = after_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE__ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
SCREAMING_SNAKE_CASE__ = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
SCREAMING_SNAKE_CASE__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE__ = tokenizer.__class__.from_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE__ = after_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
SCREAMING_SNAKE_CASE__ = tokenizer.__class__.from_pretrained(SCREAMING_SNAKE_CASE_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
def _snake_case ( self :int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(SCREAMING_SNAKE_CASE_ )
with open(os.path.join(SCREAMING_SNAKE_CASE_ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
SCREAMING_SNAKE_CASE__ = json.load(SCREAMING_SNAKE_CASE_ )
with open(os.path.join(SCREAMING_SNAKE_CASE_ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
SCREAMING_SNAKE_CASE__ = json.load(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE__ = [f'''<extra_id_{i}>''' for i in range(125 )]
SCREAMING_SNAKE_CASE__ = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
SCREAMING_SNAKE_CASE__ = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(SCREAMING_SNAKE_CASE_ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
with open(os.path.join(SCREAMING_SNAKE_CASE_ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
SCREAMING_SNAKE_CASE__ = tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE_ , )
self.assertIn(
"""an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
SCREAMING_SNAKE_CASE__ = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=SCREAMING_SNAKE_CASE_ )]
SCREAMING_SNAKE_CASE__ = tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , )
self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , )
def _snake_case ( self :Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE__ = tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertTrue(tokenizer.decode([255] ) == """""" )
def _snake_case ( self :Optional[int] ) -> Optional[int]:
"""simple docstring"""
pass
def _snake_case ( self :Optional[Any] ) -> Any:
"""simple docstring"""
pass
def _snake_case ( self :Any ) -> Union[str, Any]:
"""simple docstring"""
pass
def _snake_case ( self :Dict ) -> Union[str, Any]:
"""simple docstring"""
pass
def _snake_case ( self :List[str] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_tokenizers(fast=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE__ = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""]
SCREAMING_SNAKE_CASE__ = tokenizer.convert_tokens_to_string(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _snake_case ( self :List[str] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE__ = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(
SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
for attr in attributes_list:
setattr(SCREAMING_SNAKE_CASE_ , attr + """_id""" , SCREAMING_SNAKE_CASE_ )
self.assertEqual(getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(getattr(SCREAMING_SNAKE_CASE_ , attr + """_id""" ) , SCREAMING_SNAKE_CASE_ )
setattr(SCREAMING_SNAKE_CASE_ , attr + """_id""" , SCREAMING_SNAKE_CASE_ )
self.assertEqual(getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(getattr(SCREAMING_SNAKE_CASE_ , attr + """_id""" ) , SCREAMING_SNAKE_CASE_ )
setattr(SCREAMING_SNAKE_CASE_ , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(SCREAMING_SNAKE_CASE_ , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(SCREAMING_SNAKE_CASE_ , """additional_special_tokens_ids""" ) , [] )
setattr(SCREAMING_SNAKE_CASE_ , """additional_special_tokens_ids""" , [token_id_to_test_setters] )
self.assertListEqual(getattr(SCREAMING_SNAKE_CASE_ , """additional_special_tokens""" ) , [token_to_test_setters] )
self.assertListEqual(getattr(SCREAMING_SNAKE_CASE_ , """additional_special_tokens_ids""" ) , [token_id_to_test_setters] )
| 6 |
'''simple docstring'''
from typing import Any
class _lowercase :
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Any ) -> Any:
__snake_case = data
__snake_case = None
class _lowercase :
def __init__( self : List[Any] ) -> Tuple:
__snake_case = None
def a ( self : int ) -> Union[str, Any]:
__snake_case = self.head
while temp is not None:
print(temp.data , end=' ' )
__snake_case = temp.next
print()
def a ( self : Dict , SCREAMING_SNAKE_CASE_ : Any ) -> List[str]:
__snake_case = Node(SCREAMING_SNAKE_CASE_ )
__snake_case = self.head
__snake_case = new_node
def a ( self : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any ) -> List[str]:
if node_data_a == node_data_a:
return
else:
__snake_case = self.head
while node_a is not None and node_a.data != node_data_a:
__snake_case = node_a.next
__snake_case = self.head
while node_a is not None and node_a.data != node_data_a:
__snake_case = node_a.next
if node_a is None or node_a is None:
return
__snake_case , __snake_case = node_a.data, node_a.data
if __name__ == "__main__":
_a : Dict = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("After swapping")
ll.print_list()
| 56 | 0 |
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> str:
lowerCAmelCase = ''''''
for word_or_phrase in separated:
if not isinstance(lowercase__ , lowercase__ ):
raise Exception('''join() accepts only strings to be joined''' )
joined += word_or_phrase + separator
return joined.strip(lowercase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 312 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_a : int = {
"configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"],
"tokenization_tapas": ["TapasTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
"TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TapasForMaskedLM",
"TapasForQuestionAnswering",
"TapasForSequenceClassification",
"TapasModel",
"TapasPreTrainedModel",
"load_tf_weights_in_tapas",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = [
"TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFTapasForMaskedLM",
"TFTapasForQuestionAnswering",
"TFTapasForSequenceClassification",
"TFTapasModel",
"TFTapasPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
_a : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 56 | 0 |
def UpperCamelCase( __UpperCamelCase : float ,__UpperCamelCase : float ):
if mass < 0:
raise ValueError('''The mass of a body cannot be negative''' )
return 0.5 * mass * abs(lowercase__ ) * abs(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 171 |
'''simple docstring'''
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _lowercase ( __lowercase , __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : List[str] = AutoencoderKL
_SCREAMING_SNAKE_CASE : Union[str, Any] = "sample"
_SCREAMING_SNAKE_CASE : Union[str, Any] = 1e-2
@property
def a ( self : List[str] ) -> Optional[int]:
__snake_case = 4
__snake_case = 3
__snake_case = (32, 32)
__snake_case = floats_tensor((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE_ )
return {"sample": image}
@property
def a ( self : List[Any] ) -> List[Any]:
return (3, 32, 32)
@property
def a ( self : int ) -> int:
return (3, 32, 32)
def a ( self : Tuple ) -> Union[str, Any]:
__snake_case = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
__snake_case = self.dummy_input
return init_dict, inputs_dict
def a ( self : Optional[Any] ) -> Any:
pass
def a ( self : Tuple ) -> List[Any]:
pass
@unittest.skipIf(torch_device == 'mps' , 'Gradient checkpointing skipped on MPS' )
def a ( self : List[str] ) -> int:
# enable deterministic behavior for gradient checkpointing
__snake_case , __snake_case = self.prepare_init_args_and_inputs_for_common()
__snake_case = self.model_class(**SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
assert not model.is_gradient_checkpointing and model.training
__snake_case = model(**SCREAMING_SNAKE_CASE_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
__snake_case = torch.randn_like(SCREAMING_SNAKE_CASE_ )
__snake_case = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
__snake_case = self.model_class(**SCREAMING_SNAKE_CASE_ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(SCREAMING_SNAKE_CASE_ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
__snake_case = model_a(**SCREAMING_SNAKE_CASE_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
__snake_case = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
__snake_case = dict(model.named_parameters() )
__snake_case = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) )
def a ( self : int ) -> int:
__snake_case , __snake_case = AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy' , output_loading_info=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(SCREAMING_SNAKE_CASE_ )
__snake_case = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def a ( self : Optional[int] ) -> List[str]:
__snake_case = AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy' )
__snake_case = model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
if torch_device == "mps":
__snake_case = torch.manual_seed(0 )
else:
__snake_case = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
__snake_case = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
__snake_case = image.to(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ , sample_posterior=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).sample
__snake_case = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
__snake_case = torch.tensor(
[
-4.0_078e-01,
-3.8_323e-04,
-1.2_681e-01,
-1.1_462e-01,
2.0_095e-01,
1.0_893e-01,
-8.8_247e-02,
-3.0_361e-01,
-9.8_644e-03,
] )
elif torch_device == "cpu":
__snake_case = torch.tensor(
[-0.1_3_5_2, 0.0_8_7_8, 0.0_4_1_9, -0.0_8_1_8, -0.1_0_6_9, 0.0_6_8_8, -0.1_4_5_8, -0.4_4_4_6, -0.0_0_2_6] )
else:
__snake_case = torch.tensor(
[-0.2_4_2_1, 0.4_6_4_2, 0.2_5_0_7, -0.0_4_3_8, 0.0_6_8_2, 0.3_1_6_0, -0.2_0_1_8, -0.0_7_2_7, 0.2_4_8_5] )
self.assertTrue(torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , rtol=1e-2 ) )
@slow
class _lowercase ( unittest.TestCase ):
def a ( self : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any ) -> Union[str, Any]:
return f'gaussian_noise_s={seed}_shape={"_".join([str(SCREAMING_SNAKE_CASE_ ) for s in shape] )}.npy'
def a ( self : Optional[Any] ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : int , SCREAMING_SNAKE_CASE_ : Optional[Any]=0 , SCREAMING_SNAKE_CASE_ : int=(4, 3, 512, 512) , SCREAMING_SNAKE_CASE_ : str=False ) -> int:
__snake_case = torch.floataa if fpaa else torch.floataa
__snake_case = torch.from_numpy(load_hf_numpy(self.get_file_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) ).to(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
return image
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple="CompVis/stable-diffusion-v1-4" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False ) -> List[str]:
__snake_case = 'fp16' if fpaa else None
__snake_case = torch.floataa if fpaa else torch.floataa
__snake_case = AutoencoderKL.from_pretrained(
SCREAMING_SNAKE_CASE_ , subfolder='vae' , torch_dtype=SCREAMING_SNAKE_CASE_ , revision=SCREAMING_SNAKE_CASE_ , )
model.to(SCREAMING_SNAKE_CASE_ ).eval()
return model
def a ( self : List[str] , SCREAMING_SNAKE_CASE_ : Tuple=0 ) -> Union[str, Any]:
if torch_device == "mps":
return torch.manual_seed(SCREAMING_SNAKE_CASE_ )
return torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_3, 0.9_8_7_8, -0.0_4_9_5, -0.0_7_9_0, -0.2_7_0_9, 0.8_3_7_5, -0.2_0_6_0, -0.0_8_2_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_6, 0.1_1_6_8, 0.1_3_3_2, -0.4_8_4_0, -0.2_5_0_8, -0.0_7_9_1, -0.0_4_9_3, -0.4_0_8_9], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def a ( self : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_generator(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , sample_posterior=SCREAMING_SNAKE_CASE_ ).sample
assert sample.shape == image.shape
__snake_case = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__snake_case = torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_5_1_3, 0.0_2_8_9, 1.3_7_9_9, 0.2_1_6_6, -0.2_5_7_3, -0.0_8_7_1, 0.5_1_0_3, -0.0_9_9_9]],
[47, [-0.4_1_2_8, -0.1_3_2_0, -0.3_7_0_4, 0.1_9_6_5, -0.4_1_1_6, -0.2_3_3_2, -0.3_3_4_0, 0.2_2_4_7]],
# fmt: on
] )
@require_torch_gpu
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Union[str, Any]:
__snake_case = self.get_sd_vae_model(fpaa=SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , fpaa=SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_generator(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , sample_posterior=SCREAMING_SNAKE_CASE_ ).sample
assert sample.shape == image.shape
__snake_case = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE_ )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_9, 0.9_8_6_6, -0.0_4_8_7, -0.0_7_7_7, -0.2_7_1_6, 0.8_3_6_8, -0.2_0_5_5, -0.0_8_1_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_7, 0.1_1_4_7, 0.1_3_3_3, -0.4_8_4_1, -0.2_5_0_6, -0.0_8_0_5, -0.0_4_9_1, -0.4_0_8_5], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def a ( self : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict ) -> List[Any]:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ ).sample
assert sample.shape == image.shape
__snake_case = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__snake_case = torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_0_5_1, -0.1_8_0_3, -0.2_3_1_1, -0.2_1_1_4, -0.3_2_9_2, -0.3_5_7_4, -0.2_9_5_3, -0.3_3_2_3]],
[37, [-0.2_6_3_2, -0.2_6_2_5, -0.2_1_9_9, -0.2_7_4_1, -0.4_5_3_9, -0.4_9_9_0, -0.3_7_2_0, -0.4_9_2_5]],
# fmt: on
] )
@require_torch_gpu
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> int:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__snake_case = sample[-1, -2:, :2, -2:].flatten().cpu()
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE_ )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_3_6_9, 0.0_2_0_7, -0.0_7_7_6, -0.0_6_8_2, -0.1_7_4_7, -0.1_9_3_0, -0.1_4_6_5, -0.2_0_3_9]],
[16, [-0.1_6_2_8, -0.2_1_3_4, -0.2_7_4_7, -0.2_6_4_2, -0.3_7_7_4, -0.4_4_0_4, -0.3_6_8_7, -0.4_2_7_7]],
# fmt: on
] )
@require_torch_gpu
def a ( self : int , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> str:
__snake_case = self.get_sd_vae_model(fpaa=SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 64, 64) , fpaa=SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__snake_case = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE_ )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='xformers is not required when using PyTorch 2.0.' )
def a ( self : Any , SCREAMING_SNAKE_CASE_ : int ) -> Tuple:
__snake_case = self.get_sd_vae_model(fpaa=SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 64, 64) , fpaa=SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='xformers is not required when using PyTorch 2.0.' )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE_ : int ) -> str:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_0_0_1, 0.0_9_1_8, -2.6_9_8_4, -3.9_7_2_0, -3.2_0_9_9, -5.0_3_5_3, 1.7_3_3_8, -0.2_0_6_5, 3.4_2_6_7]],
[47, [-1.5_0_3_0, -4.3_8_7_1, -6.0_3_5_5, -9.1_1_5_7, -1.6_6_6_1, -2.7_8_5_3, 2.1_6_0_7, -5.0_8_2_3, 2.5_6_3_3]],
# fmt: on
] )
def a ( self : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Union[str, Any]:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_generator(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model.encode(SCREAMING_SNAKE_CASE_ ).latent_dist
__snake_case = dist.sample(generator=SCREAMING_SNAKE_CASE_ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
__snake_case = sample[0, -1, -3:, -3:].flatten().cpu()
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE_ )
__snake_case = 3e-3 if torch_device != 'mps' else 1e-2
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=SCREAMING_SNAKE_CASE_ )
| 56 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class SCREAMING_SNAKE_CASE_ ( __lowercase ):
"""simple docstring"""
__lowerCAmelCase : Optional[Any] ="convbert"
def __init__( self :Optional[int], snake_case :List[Any]=3_0522, snake_case :Optional[int]=768, snake_case :Any=12, snake_case :List[Any]=12, snake_case :Optional[Any]=3072, snake_case :Dict="gelu", snake_case :int=0.1, snake_case :str=0.1, snake_case :str=512, snake_case :List[str]=2, snake_case :Tuple=0.0_2, snake_case :List[str]=1e-1_2, snake_case :Any=1, snake_case :List[Any]=0, snake_case :str=2, snake_case :Optional[Any]=768, snake_case :Tuple=2, snake_case :Any=9, snake_case :Union[str, Any]=1, snake_case :Tuple=None, **snake_case :Optional[Any], ):
"""simple docstring"""
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_, bos_token_id=SCREAMING_SNAKE_CASE_, eos_token_id=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
_lowercase =vocab_size
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =max_position_embeddings
_lowercase =type_vocab_size
_lowercase =initializer_range
_lowercase =layer_norm_eps
_lowercase =embedding_size
_lowercase =head_ratio
_lowercase =conv_kernel_size
_lowercase =num_groups
_lowercase =classifier_dropout
class SCREAMING_SNAKE_CASE_ ( __lowercase ):
"""simple docstring"""
@property
def UpperCamelCase__ ( self :List[Any]):
"""simple docstring"""
if self.task == "multiple-choice":
_lowercase ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowercase ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
])
| 181 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _lowercase ( __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ShapEPipeline
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["prompt"]
_SCREAMING_SNAKE_CASE : Any = ["prompt"]
_SCREAMING_SNAKE_CASE : str = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
_SCREAMING_SNAKE_CASE : Optional[int] = False
@property
def a ( self : Any ) -> Optional[int]:
return 32
@property
def a ( self : List[Any] ) -> List[Any]:
return 32
@property
def a ( self : Tuple ) -> List[str]:
return self.time_input_dim * 4
@property
def a ( self : Dict ) -> Union[str, Any]:
return 8
@property
def a ( self : List[Any] ) -> Optional[Any]:
__snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def a ( self : Dict ) -> Any:
torch.manual_seed(0 )
__snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(SCREAMING_SNAKE_CASE_ )
@property
def a ( self : str ) -> Dict:
torch.manual_seed(0 )
__snake_case = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__snake_case = PriorTransformer(**SCREAMING_SNAKE_CASE_ )
return model
@property
def a ( self : Optional[Any] ) -> Dict:
torch.manual_seed(0 )
__snake_case = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__snake_case = ShapERenderer(**SCREAMING_SNAKE_CASE_ )
return model
def a ( self : Tuple ) -> Dict:
__snake_case = self.dummy_prior
__snake_case = self.dummy_text_encoder
__snake_case = self.dummy_tokenizer
__snake_case = self.dummy_renderer
__snake_case = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1024 , prediction_type='sample' , use_karras_sigmas=SCREAMING_SNAKE_CASE_ , clip_sample=SCREAMING_SNAKE_CASE_ , clip_sample_range=1.0 , )
__snake_case = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def a ( self : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int]=0 ) -> Union[str, Any]:
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
__snake_case = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
__snake_case = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
__snake_case = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def a ( self : Optional[Any] ) -> str:
__snake_case = 'cpu'
__snake_case = self.get_dummy_components()
__snake_case = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
__snake_case = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__snake_case = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) )
__snake_case = output.images[0]
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__snake_case = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a ( self : int ) -> List[str]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def a ( self : Dict ) -> Any:
__snake_case = torch_device == 'cpu'
__snake_case = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=SCREAMING_SNAKE_CASE_ , relax_max_difference=SCREAMING_SNAKE_CASE_ , )
def a ( self : Union[str, Any] ) -> str:
__snake_case = self.get_dummy_components()
__snake_case = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
__snake_case = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__snake_case = 1
__snake_case = 2
__snake_case = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
for key in inputs.keys():
if key in self.batch_params:
__snake_case = batch_size * [inputs[key]]
__snake_case = pipe(**SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
def a ( self : Optional[int] ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : Union[str, Any] ) -> Optional[Any]:
__snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
__snake_case = ShapEPipeline.from_pretrained('openai/shap-e' )
__snake_case = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__snake_case = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
__snake_case = pipe(
'a shark' , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 56 | 0 |
def lowerCAmelCase_ ( lowercase: str , lowercase: int ) -> list[str]:
'''simple docstring'''
return [sentence[i : i + ngram_size] for i in range(len(lowercase__ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 271 |
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
_a : Optional[Any] = 100
_a : Dict = set(range(3, NUM_PRIMES, 2))
primes.add(2)
_a : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_0_0 )
def _a (lowercase__ : int ) -> set[int]:
"""simple docstring"""
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
__snake_case = set()
__snake_case = 42
__snake_case = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def _a (lowercase__ : int = 5_0_0_0 ) -> int | None:
"""simple docstring"""
for number_to_partition in range(1 , lowercase__ ):
if len(partition(lowercase__ ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f'''{solution() = }''')
| 56 | 0 |
"""simple docstring"""
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
__magic_name__ = logging.get_logger(__name__)
class _lowerCAmelCase :
def __init__( self , a_ , a_ ) -> List[str]:
_UpperCAmelCase = question_encoder
_UpperCAmelCase = generator
_UpperCAmelCase = self.question_encoder
def _a ( self , a_ ) -> int:
if os.path.isfile(SCREAMING_SNAKE_CASE_ ):
raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file" )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , "question_encoder_tokenizer" )
_UpperCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , "generator_tokenizer" )
self.question_encoder.save_pretrained(SCREAMING_SNAKE_CASE_ )
self.generator.save_pretrained(SCREAMING_SNAKE_CASE_ )
@classmethod
def _a ( cls , a_ , **a_ ) -> str:
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
_UpperCAmelCase = kwargs.pop("config" , SCREAMING_SNAKE_CASE_ )
if config is None:
_UpperCAmelCase = RagConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = AutoTokenizer.from_pretrained(
SCREAMING_SNAKE_CASE_ , config=config.question_encoder , subfolder="question_encoder_tokenizer" )
_UpperCAmelCase = AutoTokenizer.from_pretrained(
SCREAMING_SNAKE_CASE_ , config=config.generator , subfolder="generator_tokenizer" )
return cls(question_encoder=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ )
def __call__( self , *a_ , **a_ ) -> str:
return self.current_tokenizer(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _a ( self , *a_ , **a_ ) -> Union[str, Any]:
return self.generator.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _a ( self , *a_ , **a_ ) -> Dict:
return self.generator.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _a ( self ) -> Tuple:
_UpperCAmelCase = self.question_encoder
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.generator
def _a ( self , a_ , a_ = None , a_ = None , a_ = None , a_ = "longest" , a_ = None , a_ = True , **a_ , ) -> BatchEncoding:
warnings.warn(
"`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the "
"regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` "
"context manager to prepare your targets. See the documentation of your specific tokenizer for more "
"details" , SCREAMING_SNAKE_CASE_ , )
if max_length is None:
_UpperCAmelCase = self.current_tokenizer.model_max_length
_UpperCAmelCase = self(
SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
_UpperCAmelCase = self.current_tokenizer.model_max_length
_UpperCAmelCase = self(
text_target=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
_UpperCAmelCase = labels["input_ids"]
return model_inputs
| 657 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_a : str = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def _a () -> Dict:
"""simple docstring"""
__snake_case = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
__snake_case = get_sagemaker_input()
else:
__snake_case = get_cluster_input()
return config
def _a (lowercase__ : Union[str, Any]=None ) -> int:
"""simple docstring"""
if subparsers is not None:
__snake_case = subparsers.add_parser('config' , description=lowercase__ )
else:
__snake_case = argparse.ArgumentParser('Accelerate config command' , description=lowercase__ )
parser.add_argument(
'--config_file' , default=lowercase__ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=lowercase__ )
return parser
def _a (lowercase__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case = get_user_input()
if args.config_file is not None:
__snake_case = args.config_file
else:
if not os.path.isdir(lowercase__ ):
os.makedirs(lowercase__ )
__snake_case = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(lowercase__ )
else:
config.to_yaml_file(lowercase__ )
print(f'accelerate configuration saved at {config_file}' )
def _a () -> int:
"""simple docstring"""
__snake_case = config_command_parser()
__snake_case = parser.parse_args()
config_command(lowercase__ )
if __name__ == "__main__":
main()
| 56 | 0 |
'''simple docstring'''
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('''hub/hopper-medium-v2/unet/hor32''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/unet/hor128''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/value_function''', exist_ok=True)
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] ):
if hor == 128:
lowercase = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
lowercase = (32, 128, 256)
lowercase = ("""UpResnetBlock1D""", """UpResnetBlock1D""")
elif hor == 32:
lowercase = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
lowercase = (32, 64, 128, 256)
lowercase = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""")
lowercase = torch.load(F"""/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch""" )
lowercase = model.state_dict()
lowercase = {
"""down_block_types""": down_block_types,
"""block_out_channels""": block_out_channels,
"""up_block_types""": up_block_types,
"""layers_per_block""": 1,
"""use_timestep_embedding""": True,
"""out_block_type""": """OutConv1DBlock""",
"""norm_num_groups""": 8,
"""downsample_each_block""": False,
"""in_channels""": 14,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""sample_size""": 6_5536,
"""mid_block_type""": """MidResTemporalBlock1D""",
"""act_fn""": """mish""",
}
lowercase = UNetaDModel(**lowercase__ )
print(F"""length of state dict: {len(state_dict.keys() )}""" )
print(F"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
lowercase = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowercase = state_dict.pop(lowercase__ )
hf_value_function.load_state_dict(lowercase__ )
torch.save(hf_value_function.state_dict() , F"""hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin""" )
with open(F"""hub/hopper-medium-v2/unet/hor{hor}/config.json""" , """w""" ) as f:
json.dump(lowercase__ , lowercase__ )
def SCREAMING_SNAKE_CASE ( ):
lowercase = {
"""in_channels""": 14,
"""down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""),
"""up_block_types""": (),
"""out_block_type""": """ValueFunction""",
"""mid_block_type""": """ValueFunctionMidBlock1D""",
"""block_out_channels""": (32, 64, 128, 256),
"""layers_per_block""": 1,
"""downsample_each_block""": True,
"""sample_size""": 6_5536,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""use_timestep_embedding""": True,
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""norm_num_groups""": 8,
"""act_fn""": """mish""",
}
lowercase = torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""" )
lowercase = model
lowercase = UNetaDModel(**lowercase__ )
print(F"""length of state dict: {len(state_dict.keys() )}""" )
print(F"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
lowercase = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowercase = state_dict.pop(lowercase__ )
hf_value_function.load_state_dict(lowercase__ )
torch.save(hf_value_function.state_dict() , """hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""" )
with open("""hub/hopper-medium-v2/value_function/config.json""" , """w""" ) as f:
json.dump(lowercase__ , lowercase__ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 588 |
'''simple docstring'''
from __future__ import annotations
import math
def _a (lowercase__ : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowercase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
_a : Dict = [num for num in range(3, 100_001, 2) if not is_prime(num)]
def _a (lowercase__ : int ) -> list[int]:
"""simple docstring"""
if not isinstance(lowercase__ , lowercase__ ):
raise ValueError('n must be an integer' )
if n <= 0:
raise ValueError('n must be >= 0' )
__snake_case = []
for num in range(len(lowercase__ ) ):
__snake_case = 0
while 2 * i * i <= odd_composites[num]:
__snake_case = odd_composites[num] - 2 * i * i
if is_prime(lowercase__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(lowercase__ ) == n:
return list_nums
return []
def _a () -> int:
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 56 | 0 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> int:
'''simple docstring'''
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError('''The length of profit and weight must be same.''' )
if max_weight <= 0:
raise ValueError('''max_weight must greater than zero.''' )
if any(p < 0 for p in profit ):
raise ValueError('''Profit can not be negative.''' )
if any(w < 0 for w in weight ):
raise ValueError('''Weight can not be negative.''' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
snake_case_ = [p / w for p, w in zip(lowercase__, lowercase__ )]
# Creating a copy of the list and sorting profit/weight in ascending order
snake_case_ = sorted(lowercase__ )
# declaring useful variables
snake_case_ = len(lowercase__ )
snake_case_ = 0
snake_case_ = 0
snake_case_ = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
snake_case_ = sorted_profit_by_weight[length - i - 1]
snake_case_ = profit_by_weight.index(lowercase__ )
snake_case_ = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'Input profits, weights, and then max_weight (all positive ints) separated by '
'spaces.'
)
a : str = [int(x) for x in input('Input profits separated by spaces: ').split()]
a : str = [int(x) for x in input('Input weights separated by spaces: ').split()]
a : Any = int(input('Max weight allowed: '))
# Function Call
calc_profit(profit, weight, max_weight)
| 640 |
'''simple docstring'''
from __future__ import annotations
def _a (lowercase__ : int , lowercase__ : int ) -> list[str]:
"""simple docstring"""
if partitions <= 0:
raise ValueError('partitions must be a positive number!' )
if partitions > number_of_bytes:
raise ValueError('partitions can not > number_of_bytes!' )
__snake_case = number_of_bytes // partitions
__snake_case = []
for i in range(lowercase__ ):
__snake_case = i * bytes_per_partition + 1
__snake_case = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 | 0 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class lowerCamelCase (__lowercase , unittest.TestCase ):
'''simple docstring'''
_snake_case : Union[str, Any] = BertJapaneseTokenizer
_snake_case : str = False
_snake_case : Any = True
def __UpperCAmelCase ( self ) -> List[Any]:
super().setUp()
UpperCAmelCase_ : int = [
'[UNK]',
'[CLS]',
'[SEP]',
'こんにちは',
'こん',
'にちは',
'ばんは',
'##こん',
'##にちは',
'##ばんは',
'世界',
'##世界',
'、',
'##、',
'。',
'##。',
]
UpperCAmelCase_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[int]:
UpperCAmelCase_ : int = 'こんにちは、世界。 \nこんばんは、世界。'
UpperCAmelCase_ : List[Any] = 'こんにちは 、 世界 。 こんばんは 、 世界 。'
return input_text, output_text
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Union[str, Any]:
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.get_input_output_texts(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ : Tuple = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ : str = tokenizer.decode(SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
return text, ids
def __UpperCAmelCase ( self ) -> Union[str, Any]:
pass # TODO add if relevant
def __UpperCAmelCase ( self ) -> Union[str, Any]:
pass # TODO add if relevant
def __UpperCAmelCase ( self ) -> int:
pass # TODO add if relevant
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : Optional[Any] = self.tokenizer_class(self.vocab_file )
UpperCAmelCase_ : Union[str, Any] = tokenizer.tokenize('こんにちは、世界。\nこんばんは、世界。' )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : Any = self.tokenizer_class(self.vocab_file , word_tokenizer_type='mecab' )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ : Union[str, Any] = 'こんにちは、世界。\nこんばんは、世界。'
UpperCAmelCase_ : str = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
UpperCAmelCase_ : Dict = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(SCREAMING_SNAKE_CASE_ , 'wb' ) as handle:
pickle.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ , 'rb' ) as handle:
UpperCAmelCase_ : List[str] = pickle.load(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ : Tuple = tokenizer_new.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = MecabTokenizer(mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def __UpperCAmelCase ( self ) -> Tuple:
try:
UpperCAmelCase_ : Optional[int] = MecabTokenizer(mecab_dic='unidic_lite' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def __UpperCAmelCase ( self ) -> Optional[Any]:
try:
UpperCAmelCase_ : int = MecabTokenizer(mecab_dic='unidic' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : Union[str, Any] = MecabTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iphone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def __UpperCAmelCase ( self ) -> int:
try:
UpperCAmelCase_ : int = MecabTokenizer(
do_lower_case=SCREAMING_SNAKE_CASE_ , normalize_text=SCREAMING_SNAKE_CASE_ , mecab_option='-d /usr/local/lib/mecab/dic/jumandic' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '\u3000', '。'] , )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = MecabTokenizer(normalize_text=SCREAMING_SNAKE_CASE_ , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', ' ', '。'] , )
@require_sudachi
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = self.tokenizer_class(self.vocab_file , word_tokenizer_type='sudachi' )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ : Any = 'こんにちは、世界。\nこんばんは、世界。'
UpperCAmelCase_ : Tuple = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
UpperCAmelCase_ : Tuple = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(SCREAMING_SNAKE_CASE_ , 'wb' ) as handle:
pickle.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ , 'rb' ) as handle:
UpperCAmelCase_ : Dict = pickle.load(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ : Union[str, Any] = tokenizer_new.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@require_sudachi
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : Any = SudachiTokenizer(sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : Any = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='A' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国', '人', '参政', '権'] )
@require_sudachi
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase_ : str = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='B' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人', '参政権'] )
@require_sudachi
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : Dict = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='C' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人参政権'] )
@require_sudachi
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ : str = SudachiTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iphone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ : Dict = SudachiTokenizer(normalize_text=SCREAMING_SNAKE_CASE_ , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', '\u3000', '。', ' ', ' '] , )
@require_sudachi
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase_ : str = SudachiTokenizer(trim_whitespace=SCREAMING_SNAKE_CASE_ , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
@require_jumanpp
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : List[Any] = self.tokenizer_class(self.vocab_file , word_tokenizer_type='jumanpp' )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ : Union[str, Any] = 'こんにちは、世界。\nこんばんは、世界。'
UpperCAmelCase_ : Union[str, Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
UpperCAmelCase_ : Optional[int] = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(SCREAMING_SNAKE_CASE_ , 'wb' ) as handle:
pickle.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ , 'rb' ) as handle:
UpperCAmelCase_ : Tuple = pickle.load(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ : Optional[int] = tokenizer_new.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@require_jumanpp
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : Dict = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = JumanppTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iphone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase_ : str = JumanppTokenizer(normalize_text=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['ア', 'ッ', 'フ', '゚', 'ル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : int = JumanppTokenizer(trim_whitespace=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '。'] , )
@require_jumanpp
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : Dict = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('ありがとうございますm(_ _)m見つけるのが大変です。' ) , ['ありがとう', 'ございます', 'm(_ _)m', '見つける', 'の', 'が', '大変です', '。'] , )
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : str = ['[UNK]', '[CLS]', '[SEP]', 'こんにちは', 'こん', 'にちは', 'ばんは', '##こん', '##にちは', '##ばんは']
UpperCAmelCase_ : List[Any] = {}
for i, token in enumerate(SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase_ : Tuple = i
UpperCAmelCase_ : Optional[int] = WordpieceTokenizer(vocab=SCREAMING_SNAKE_CASE_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こんにちは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは' ) , ['こん', '##ばんは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは こんばんにちは こんにちは' ) , ['こん', '##ばんは', '[UNK]', 'こんにちは'] )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Union[str, Any] = BertJapaneseTokenizer.from_pretrained('nlp-waseda/roberta-base-japanese-with-auto-jumanpp' )
UpperCAmelCase_ : Optional[int] = tokenizer.subword_tokenizer
UpperCAmelCase_ : Tuple = subword_tokenizer.tokenize('国境 の 長い トンネル を 抜ける と 雪国 であった 。' )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['▁国境', '▁の', '▁長い', '▁トンネル', '▁を', '▁抜ける', '▁と', '▁雪', '国', '▁であった', '▁。'] )
UpperCAmelCase_ : Union[str, Any] = subword_tokenizer.tokenize('こんばんは こんばん にち は こんにちは' )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['▁こん', 'ばん', 'は', '▁こん', 'ばん', '▁に', 'ち', '▁は', '▁こんにちは'] )
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase_ : Any = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese' )
UpperCAmelCase_ : Tuple = tokenizer.encode('ありがとう。' , add_special_tokens=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ : List[str] = tokenizer.encode('どういたしまして。' , add_special_tokens=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ : Any = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ : Any = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowerCamelCase (__lowercase , unittest.TestCase ):
'''simple docstring'''
_snake_case : Tuple = BertJapaneseTokenizer
_snake_case : List[Any] = False
def __UpperCAmelCase ( self ) -> int:
super().setUp()
UpperCAmelCase_ : Tuple = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
UpperCAmelCase_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def __UpperCAmelCase ( self , **_UpperCamelCase ) -> List[str]:
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='character' , **SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Tuple:
UpperCAmelCase_ : List[str] = 'こんにちは、世界。 \nこんばんは、世界。'
UpperCAmelCase_ : Optional[int] = 'こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'
return input_text, output_text
def __UpperCAmelCase ( self ) -> List[Any]:
pass # TODO add if relevant
def __UpperCAmelCase ( self ) -> Any:
pass # TODO add if relevant
def __UpperCAmelCase ( self ) -> List[str]:
pass # TODO add if relevant
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : List[Any] = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='character' )
UpperCAmelCase_ : str = tokenizer.tokenize('こんにちは、世界。 \nこんばんは、世界。' )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , ['こ', 'ん', 'に', 'ち', 'は', '、', '世', '界', '。', 'こ', 'ん', 'ば', 'ん', 'は', '、', '世', '界', '。'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [3, 4, 5, 6, 7, 1_1, 9, 1_0, 1_2, 3, 4, 8, 4, 7, 1_1, 9, 1_0, 1_2] )
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : List[Any] = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
UpperCAmelCase_ : int = {}
for i, token in enumerate(SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase_ : Optional[int] = i
UpperCAmelCase_ : int = CharacterTokenizer(vocab=SCREAMING_SNAKE_CASE_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こ', 'ん', 'に', 'ち', 'は'] )
self.assertListEqual(tokenizer.tokenize('こんにちほ' ) , ['こ', 'ん', 'に', 'ち', '[UNK]'] )
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase_ : int = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese-char' )
UpperCAmelCase_ : Any = tokenizer.encode('ありがとう。' , add_special_tokens=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ : Optional[int] = tokenizer.encode('どういたしまして。' , add_special_tokens=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ : List[Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : int = 'cl-tohoku/bert-base-japanese'
UpperCAmelCase_ : int = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : int = 'cl-tohoku/bert-base-japanese'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
UpperCAmelCase_ : Any = 'bert-base-cased'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertJapaneseTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
| 406 |
'''simple docstring'''
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class _lowercase ( __lowercase ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.0_1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1000 ) -> Tuple:
__snake_case = p_stop
__snake_case = max_length
def __iter__( self : Any ) -> Union[str, Any]:
__snake_case = 0
__snake_case = False
while not stop and count < self.max_length:
yield count
count += 1
__snake_case = random.random() < self.p_stop
class _lowercase ( unittest.TestCase ):
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : str=False , SCREAMING_SNAKE_CASE_ : str=True ) -> Union[str, Any]:
__snake_case = [
BatchSamplerShard(SCREAMING_SNAKE_CASE_ , 2 , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
for i in range(2 )
]
__snake_case = [list(SCREAMING_SNAKE_CASE_ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(SCREAMING_SNAKE_CASE_ ) for shard in batch_sampler_shards] , [len(SCREAMING_SNAKE_CASE_ ) for e in expected] )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Tuple ) -> str:
# Check the shards when the dataset is a round multiple of total batch size.
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] ) -> Union[str, Any]:
# Check the shards when the dataset is a round multiple of batch size.
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size.
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
def a ( self : str ) -> str:
# Check the shards when the dataset is a round multiple of total batch size.
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[[0, 1]], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
def a ( self : int ) -> Tuple:
# Check the shards when the dataset is a round multiple of batch size.
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size.
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[[0, 1]], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[int] ) -> Tuple:
__snake_case = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
__snake_case = [BatchSamplerShard(SCREAMING_SNAKE_CASE_ , 2 , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int=False , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : int=False ) -> List[Any]:
random.seed(SCREAMING_SNAKE_CASE_ )
__snake_case = list(SCREAMING_SNAKE_CASE_ )
__snake_case = [
IterableDatasetShard(
SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , drop_last=SCREAMING_SNAKE_CASE_ , num_processes=SCREAMING_SNAKE_CASE_ , process_index=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , )
for i in range(SCREAMING_SNAKE_CASE_ )
]
__snake_case = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(SCREAMING_SNAKE_CASE_ )
iterable_dataset_lists.append(list(SCREAMING_SNAKE_CASE_ ) )
__snake_case = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
__snake_case = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
self.assertTrue(len(SCREAMING_SNAKE_CASE_ ) % shard_batch_size == 0 )
__snake_case = []
for idx in range(0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(SCREAMING_SNAKE_CASE_ ) < len(SCREAMING_SNAKE_CASE_ ):
reference += reference
self.assertListEqual(SCREAMING_SNAKE_CASE_ , reference[: len(SCREAMING_SNAKE_CASE_ )] )
def a ( self : Dict ) -> Tuple:
__snake_case = 42
__snake_case = RandomIterableDataset()
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Edge case with a very small dataset
__snake_case = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] ) -> str:
__snake_case = BatchSampler(range(16 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = SkipBatchSampler(SCREAMING_SNAKE_CASE_ , 2 )
self.assertListEqual(list(SCREAMING_SNAKE_CASE_ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a ( self : str ) -> Union[str, Any]:
__snake_case = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a ( self : Any ) -> str:
__snake_case = DataLoader(list(range(16 ) ) , batch_size=4 )
__snake_case = skip_first_batches(SCREAMING_SNAKE_CASE_ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a ( self : Dict ) -> Optional[Any]:
__snake_case = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def a ( self : Tuple ) -> Dict:
Accelerator()
__snake_case = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 56 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE__ ( metaclass=__lowercase ):
"""simple docstring"""
A__ : List[Any] = ["onnx"]
def __init__( self , *A , **A ) -> Optional[Any]:
requires_backends(self , ["""onnx"""] )
@classmethod
def a__ ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""onnx"""] )
@classmethod
def a__ ( cls , *A , **A ) -> List[str]:
requires_backends(cls , ["""onnx"""] )
| 135 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
_a : int = get_tests_dir("fixtures/test_sentencepiece.model")
_a : Dict = {"target_lang": "fi", "source_lang": "en"}
_a : Optional[int] = ">>zh<<"
_a : List[str] = "Helsinki-NLP/"
if is_torch_available():
_a : List[str] = "pt"
elif is_tf_available():
_a : Dict = "tf"
else:
_a : Union[str, Any] = "jax"
@require_sentencepiece
class _lowercase ( __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : int = MarianTokenizer
_SCREAMING_SNAKE_CASE : str = False
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
def a ( self : int ) -> int:
super().setUp()
__snake_case = ['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>']
__snake_case = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
__snake_case = Path(self.tmpdirname )
save_json(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['vocab'] )
save_json(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['tokenizer_config_file'] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['source_spm'] )
copyfile(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['target_spm'] )
__snake_case = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def a ( self : int , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> MarianTokenizer:
return MarianTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def a ( self : str , SCREAMING_SNAKE_CASE_ : List[str] ) -> List[Any]:
return (
"This is a test",
"This is a test",
)
def a ( self : int ) -> Optional[Any]:
__snake_case = '</s>'
__snake_case = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def a ( self : Dict ) -> List[str]:
__snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '</s>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '<pad>' )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 9 )
def a ( self : List[Any] ) -> str:
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def a ( self : Any ) -> Optional[int]:
__snake_case = MarianTokenizer.from_pretrained(f'{ORG_NAME}opus-mt-en-de' )
__snake_case = en_de_tokenizer(['I am a small frog'] , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = [38, 121, 14, 697, 3_8848, 0]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , batch.input_ids[0] )
__snake_case = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
__snake_case = [x.name for x in Path(SCREAMING_SNAKE_CASE_ ).glob('*' )]
self.assertIn('source.spm' , SCREAMING_SNAKE_CASE_ )
MarianTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[int] ) -> Any:
__snake_case = self.get_tokenizer()
__snake_case = tok(
['I am a small frog' * 1000, 'I am a small frog'] , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def a ( self : Tuple ) -> Dict:
__snake_case = self.get_tokenizer()
__snake_case = tok(['I am a tiny frog', 'I am a small frog'] , padding=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def a ( self : int ) -> int:
# fmt: off
__snake_case = {'input_ids': [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE_ , model_name='Helsinki-NLP/opus-mt-en-de' , revision='1a8c2263da11e68e50938f97e10cd57820bd504c' , decode_kwargs={'use_source_tokenizer': True} , )
def a ( self : Dict ) -> str:
__snake_case = MarianTokenizer.from_pretrained('hf-internal-testing/test-marian-two-vocabs' )
__snake_case = 'Tämä on testi'
__snake_case = 'This is a test'
__snake_case = [76, 7, 2047, 2]
__snake_case = [69, 12, 11, 940, 2]
__snake_case = tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer(text_target=SCREAMING_SNAKE_CASE_ ).input_ids
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 56 | 0 |
"""simple docstring"""
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = "T5Config"
def UpperCAmelCase ( a__ , a__ , a__ ):
'''simple docstring'''
lowerCAmelCase :List[Any] = jnp.zeros_like(lowercase__ )
lowerCAmelCase :str = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
lowerCAmelCase :Any = shifted_input_ids.at[:, 0].set(lowercase__ )
lowerCAmelCase :Union[str, Any] = jnp.where(shifted_input_ids == -1_00 , lowercase__ , lowercase__ )
return shifted_input_ids
class __UpperCamelCase ( __lowercase ):
lowercase_ : str = "mt5"
lowercase_ : str = MTaConfig
class __UpperCamelCase ( __lowercase ):
lowercase_ : List[str] = "mt5"
lowercase_ : int = MTaConfig
class __UpperCamelCase ( __lowercase ):
lowercase_ : Union[str, Any] = "mt5"
lowercase_ : Dict = MTaConfig
| 553 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def _a (lowercase__ : bytes ) -> bytes:
"""simple docstring"""
if len(lowercase__ ) != 3_2:
raise ValueError('Input must be of length 32' )
__snake_case = B''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def _a (lowercase__ : int ) -> bytes:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
__snake_case = format(lowercase__ , '08x' )[-8:]
__snake_case = B''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def _a (lowercase__ : bytes ) -> bytes:
"""simple docstring"""
__snake_case = B''
for char in message:
bit_string += format(lowercase__ , '08b' ).encode('utf-8' )
__snake_case = format(len(lowercase__ ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(lowercase__ ) % 5_1_2 != 4_4_8:
bit_string += b"0"
bit_string += to_little_endian(start_len[3_2:] ) + to_little_endian(start_len[:3_2] )
return bit_string
def _a (lowercase__ : bytes ) -> Generator[list[int], None, None]:
"""simple docstring"""
if len(lowercase__ ) % 5_1_2 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(lowercase__ ) , 5_1_2 ):
__snake_case = bit_string[pos : pos + 5_1_2]
__snake_case = []
for i in range(0 , 5_1_2 , 3_2 ):
block_words.append(int(to_little_endian(block[i : i + 3_2] ) , 2 ) )
yield block_words
def _a (lowercase__ : int ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
__snake_case = format(lowercase__ , '032b' )
__snake_case = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(lowercase__ , 2 )
def _a (lowercase__ : int , lowercase__ : int ) -> int:
"""simple docstring"""
return (a + b) % 2**3_2
def _a (lowercase__ : int , lowercase__ : int ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (3_2 - shift))) % 2**3_2
def _a (lowercase__ : bytes ) -> bytes:
"""simple docstring"""
__snake_case = preprocess(lowercase__ )
__snake_case = [int(2**3_2 * abs(sin(i + 1 ) ) ) for i in range(6_4 )]
# Starting states
__snake_case = 0x6_7_4_5_2_3_0_1
__snake_case = 0xE_F_C_D_A_B_8_9
__snake_case = 0x9_8_B_A_D_C_F_E
__snake_case = 0x1_0_3_2_5_4_7_6
__snake_case = [
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(lowercase__ ):
__snake_case = aa
__snake_case = ba
__snake_case = ca
__snake_case = da
# Hash current chunk
for i in range(6_4 ):
if i <= 1_5:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
__snake_case = d ^ (b & (c ^ d))
__snake_case = i
elif i <= 3_1:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
__snake_case = c ^ (d & (b ^ c))
__snake_case = (5 * i + 1) % 1_6
elif i <= 4_7:
__snake_case = b ^ c ^ d
__snake_case = (3 * i + 5) % 1_6
else:
__snake_case = c ^ (b | not_aa(lowercase__ ))
__snake_case = (7 * i) % 1_6
__snake_case = (f + a + added_consts[i] + block_words[g]) % 2**3_2
__snake_case = d
__snake_case = c
__snake_case = b
__snake_case = sum_aa(lowercase__ , left_rotate_aa(lowercase__ , shift_amounts[i] ) )
# Add hashed chunk to running total
__snake_case = sum_aa(lowercase__ , lowercase__ )
__snake_case = sum_aa(lowercase__ , lowercase__ )
__snake_case = sum_aa(lowercase__ , lowercase__ )
__snake_case = sum_aa(lowercase__ , lowercase__ )
__snake_case = reformat_hex(lowercase__ ) + reformat_hex(lowercase__ ) + reformat_hex(lowercase__ ) + reformat_hex(lowercase__ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 | 0 |
from __future__ import annotations
from typing import Any
class UpperCamelCase_ :
def __init__( self :Any , __A :int ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = num_of_nodes
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = {}
def _snake_case ( self :List[Any] , __A :int , __A :int , __A :int ) -> None:
"""simple docstring"""
self.m_edges.append([u_node, v_node, weight] )
def _snake_case ( self :str , __A :int ) -> int:
"""simple docstring"""
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def _snake_case ( self :Union[str, Any] , __A :int ) -> None:
"""simple docstring"""
if self.m_component[u_node] != u_node:
for k in self.m_component:
SCREAMING_SNAKE_CASE__ = self.find_component(SCREAMING_SNAKE_CASE_ )
def _snake_case ( self :Tuple , __A :list[int] , __A :int , __A :int ) -> None:
"""simple docstring"""
if component_size[u_node] <= component_size[v_node]:
SCREAMING_SNAKE_CASE__ = v_node
component_size[v_node] += component_size[u_node]
self.set_component(SCREAMING_SNAKE_CASE_ )
elif component_size[u_node] >= component_size[v_node]:
SCREAMING_SNAKE_CASE__ = self.find_component(SCREAMING_SNAKE_CASE_ )
component_size[u_node] += component_size[v_node]
self.set_component(SCREAMING_SNAKE_CASE_ )
def _snake_case ( self :Any ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
SCREAMING_SNAKE_CASE__ = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = edge
SCREAMING_SNAKE_CASE__ = self.m_component[u]
SCREAMING_SNAKE_CASE__ = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
SCREAMING_SNAKE_CASE__ = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = edge
SCREAMING_SNAKE_CASE__ = self.m_component[u]
SCREAMING_SNAKE_CASE__ = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print(f'''Added edge [{u} - {v}]\nAdded weight: {w}\n''' )
num_of_components -= 1
SCREAMING_SNAKE_CASE__ = [-1] * self.m_num_of_nodes
print(f'''The total weight of the minimal spanning tree is: {mst_weight}''' )
def SCREAMING_SNAKE_CASE__ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 |
'''simple docstring'''
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def _a (lowercase__ : str , lowercase__ : str , lowercase__ : Optional[str] = None ) -> str:
"""simple docstring"""
if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release:
# old versions of hfh don't url-encode the file path
__snake_case = quote(lowercase__ )
return hfh.hf_hub_url(lowercase__ , lowercase__ , repo_type='dataset' , revision=lowercase__ )
| 56 | 0 |
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowercase__ : List[str] = transforms.Compose(
[
transforms.Resize((2_5_6, 2_5_6)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Tuple:
if isinstance(lowercase__ , torch.Tensor ):
return image
elif isinstance(lowercase__ , PIL.Image.Image ):
lowerCAmelCase = [image]
lowerCAmelCase = [trans(img.convert('''RGB''' ) ) for img in image]
lowerCAmelCase = torch.stack(lowercase__ )
return image
class lowercase_ ( __lowercase ):
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->List[str]:
super().__init__()
# make sure scheduler can always be converted to DDIM
lowerCAmelCase = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->str:
if strength < 0 or strength > 1:
raise ValueError(F"The value of strength should in [0.0, 1.0] but is {strength}" )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->str:
# get the original timestep using init_timestep
lowerCAmelCase = min(int(num_inference_steps * strength ) , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = max(num_inference_steps - init_timestep , 0 )
lowerCAmelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) ->List[str]:
if not isinstance(SCREAMING_SNAKE_CASE_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(SCREAMING_SNAKE_CASE_ )}" )
lowerCAmelCase = image.to(device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) != batch_size:
raise ValueError(
F"You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE_ )}, but requested an effective batch"
F" size of {batch_size}. Make sure the batch size matches the length of the generators." )
lowerCAmelCase = init_latents.shape
lowerCAmelCase = randn_tensor(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )
# get latents
print('''add noise to latents at timestep''' , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = self.scheduler.add_noise(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = init_latents
return latents
@torch.no_grad()
def __call__( self , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 0.8 , __SCREAMING_SNAKE_CASE = 1 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 0.0 , __SCREAMING_SNAKE_CASE = 50 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "pil" , __SCREAMING_SNAKE_CASE = True , ) ->Union[ImagePipelineOutput, Tuple]:
self.check_inputs(SCREAMING_SNAKE_CASE_ )
# 2. Preprocess image
lowerCAmelCase = preprocess(SCREAMING_SNAKE_CASE_ )
# 3. set timesteps
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ , device=self.device )
lowerCAmelCase , lowerCAmelCase = self.get_timesteps(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.device )
lowerCAmelCase = timesteps[:1].repeat(SCREAMING_SNAKE_CASE_ )
# 4. Prepare latent variables
lowerCAmelCase = self.prepare_latents(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.unet.dtype , self.device , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = latents
# 5. Denoising loop
for t in self.progress_bar(SCREAMING_SNAKE_CASE_ ):
# 1. predict noise model_output
lowerCAmelCase = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCAmelCase = self.scheduler.step(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , use_clipped_model_output=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , ).prev_sample
lowerCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE_ )
| 312 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def _a (lowercase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _lowercase ( nn.Module ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : nn.Module , SCREAMING_SNAKE_CASE_ : int ) -> str:
super().__init__()
__snake_case = module
__snake_case = nn.Sequential(
nn.Linear(module.in_features , SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ ) , nn.Linear(SCREAMING_SNAKE_CASE_ , module.out_features , bias=SCREAMING_SNAKE_CASE_ ) , )
__snake_case = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=SCREAMING_SNAKE_CASE_ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Union[str, Any]:
return self.module(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) + self.adapter(SCREAMING_SNAKE_CASE_ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowercase ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
_SCREAMING_SNAKE_CASE : Tuple = "bigscience/bloom-1b7"
# Constant values
_SCREAMING_SNAKE_CASE : Union[str, Any] = 2.109659552692574
_SCREAMING_SNAKE_CASE : Optional[Any] = "Hello my name is"
_SCREAMING_SNAKE_CASE : List[str] = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
_SCREAMING_SNAKE_CASE : Dict = 1_0
def a ( self : Optional[Any] ) -> List[Any]:
# Models and tokenizer
__snake_case = AutoTokenizer.from_pretrained(self.model_name )
class _lowercase ( __lowercase ):
def a ( self : Union[str, Any] ) -> List[str]:
super().setUp()
# Models and tokenizer
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto' )
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
def a ( self : Optional[Any] ) -> Any:
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def a ( self : Optional[Any] ) -> int:
__snake_case = self.model_abit.config
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'quantization_config' ) )
__snake_case = config.to_dict()
__snake_case = config.to_diff_dict()
__snake_case = config.to_json_string()
def a ( self : Optional[Any] ) -> str:
from bitsandbytes.nn import Paramsabit
__snake_case = self.model_fpaa.get_memory_footprint()
__snake_case = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
__snake_case = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def a ( self : Union[str, Any] ) -> Optional[Any]:
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(SCREAMING_SNAKE_CASE_ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def a ( self : Union[str, Any] ) -> int:
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
__snake_case = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) , self.EXPECTED_OUTPUTS )
def a ( self : Optional[Any] ) -> Dict:
__snake_case = BitsAndBytesConfig()
__snake_case = True
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
__snake_case = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) , self.EXPECTED_OUTPUTS )
def a ( self : List[Any] ) -> str:
with self.assertRaises(SCREAMING_SNAKE_CASE_ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(SCREAMING_SNAKE_CASE_ )
def a ( self : Any ) -> Union[str, Any]:
__snake_case = BitsAndBytesConfig()
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=SCREAMING_SNAKE_CASE_ , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' , bnb_abit_quant_type='nf4' , )
def a ( self : Tuple ) -> Dict:
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with `str`
self.model_abit.to('cpu' )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0' ) )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
__snake_case = self.model_fpaa.to(torch.floataa )
__snake_case = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
__snake_case = self.model_fpaa.to('cpu' )
# Check this does not throw an error
__snake_case = self.model_fpaa.half()
# Check this does not throw an error
__snake_case = self.model_fpaa.float()
def a ( self : Tuple ) -> Union[str, Any]:
__snake_case = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowercase ( unittest.TestCase ):
@classmethod
def a ( cls : Union[str, Any] ) -> Dict:
__snake_case = 't5-small'
__snake_case = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
__snake_case = AutoTokenizer.from_pretrained(cls.model_name )
__snake_case = 'Translate in German: Hello, my dog is cute'
def a ( self : List[Any] ) -> str:
gc.collect()
torch.cuda.empty_cache()
def a ( self : int ) -> Optional[Any]:
from transformers import TaForConditionalGeneration
__snake_case = TaForConditionalGeneration._keep_in_fpaa_modules
__snake_case = None
# test with `t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
# test with `flan-t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
__snake_case = modules
def a ( self : List[str] ) -> Any:
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
# test with `flan-t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
class _lowercase ( __lowercase ):
def a ( self : Dict ) -> str:
super().setUp()
# model_name
__snake_case = 'bigscience/bloom-560m'
__snake_case = 't5-small'
# Different types of model
__snake_case = AutoModel.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# Sequence classification model
__snake_case = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# CausalLM model
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# Seq2seq model
__snake_case = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
def a ( self : int ) -> Dict:
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def a ( self : Any ) -> Optional[Any]:
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class _lowercase ( __lowercase ):
def a ( self : str ) -> Union[str, Any]:
super().setUp()
def a ( self : Optional[Any] ) -> str:
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def a ( self : Optional[int] ) -> List[str]:
__snake_case = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__snake_case = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class _lowercase ( __lowercase ):
def a ( self : Optional[int] ) -> Union[str, Any]:
super().setUp()
def a ( self : Optional[int] ) -> List[Any]:
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='balanced' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
# Second real batch
__snake_case = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) , self.EXPECTED_OUTPUTS )
class _lowercase ( __lowercase ):
def a ( self : Any ) -> str:
__snake_case = 'facebook/opt-350m'
super().setUp()
def a ( self : int ) -> List[Any]:
if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ):
return
# Step 1: freeze all parameters
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
__snake_case = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__snake_case = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(SCREAMING_SNAKE_CASE_ ) ):
__snake_case = LoRALayer(module.q_proj , rank=16 )
__snake_case = LoRALayer(module.k_proj , rank=16 )
__snake_case = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
__snake_case = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__snake_case = model.forward(**SCREAMING_SNAKE_CASE_ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(SCREAMING_SNAKE_CASE_ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = "gpt2-xl"
_SCREAMING_SNAKE_CASE : Optional[int] = 3.3191854854152187
| 56 | 0 |
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def UpperCamelCase( __UpperCamelCase : int ,__UpperCamelCase : int ,__UpperCamelCase : float = 1 / sqrt(2 ) ):
lowerCAmelCase_ : Any = tau * frequency / samplerate
lowerCAmelCase_ : Union[str, Any] = sin(lowercase__ )
lowerCAmelCase_ : str = cos(lowercase__ )
lowerCAmelCase_ : str = _sin / (2 * q_factor)
lowerCAmelCase_ : List[str] = (1 - _cos) / 2
lowerCAmelCase_ : Tuple = 1 - _cos
lowerCAmelCase_ : Optional[int] = 1 + alpha
lowerCAmelCase_ : List[str] = -2 * _cos
lowerCAmelCase_ : List[str] = 1 - alpha
lowerCAmelCase_ : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def UpperCamelCase( __UpperCamelCase : int ,__UpperCamelCase : int ,__UpperCamelCase : float = 1 / sqrt(2 ) ):
lowerCAmelCase_ : Dict = tau * frequency / samplerate
lowerCAmelCase_ : Union[str, Any] = sin(lowercase__ )
lowerCAmelCase_ : int = cos(lowercase__ )
lowerCAmelCase_ : Optional[Any] = _sin / (2 * q_factor)
lowerCAmelCase_ : Optional[int] = (1 + _cos) / 2
lowerCAmelCase_ : Dict = -1 - _cos
lowerCAmelCase_ : Optional[int] = 1 + alpha
lowerCAmelCase_ : Dict = -2 * _cos
lowerCAmelCase_ : str = 1 - alpha
lowerCAmelCase_ : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def UpperCamelCase( __UpperCamelCase : int ,__UpperCamelCase : int ,__UpperCamelCase : float = 1 / sqrt(2 ) ):
lowerCAmelCase_ : Union[str, Any] = tau * frequency / samplerate
lowerCAmelCase_ : Any = sin(lowercase__ )
lowerCAmelCase_ : Tuple = cos(lowercase__ )
lowerCAmelCase_ : Union[str, Any] = _sin / (2 * q_factor)
lowerCAmelCase_ : Optional[Any] = _sin / 2
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : Dict = -ba
lowerCAmelCase_ : Union[str, Any] = 1 + alpha
lowerCAmelCase_ : Union[str, Any] = -2 * _cos
lowerCAmelCase_ : Tuple = 1 - alpha
lowerCAmelCase_ : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def UpperCamelCase( __UpperCamelCase : int ,__UpperCamelCase : int ,__UpperCamelCase : float = 1 / sqrt(2 ) ):
lowerCAmelCase_ : int = tau * frequency / samplerate
lowerCAmelCase_ : int = sin(lowercase__ )
lowerCAmelCase_ : List[Any] = cos(lowercase__ )
lowerCAmelCase_ : List[Any] = _sin / (2 * q_factor)
lowerCAmelCase_ : int = 1 - alpha
lowerCAmelCase_ : int = -2 * _cos
lowerCAmelCase_ : Union[str, Any] = 1 + alpha
lowerCAmelCase_ : int = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] ,[ba, ba, ba] )
return filt
def UpperCamelCase( __UpperCamelCase : int ,__UpperCamelCase : int ,__UpperCamelCase : float ,__UpperCamelCase : float = 1 / sqrt(2 ) ,):
lowerCAmelCase_ : Optional[int] = tau * frequency / samplerate
lowerCAmelCase_ : Tuple = sin(lowercase__ )
lowerCAmelCase_ : Tuple = cos(lowercase__ )
lowerCAmelCase_ : Dict = _sin / (2 * q_factor)
lowerCAmelCase_ : Optional[int] = 10 ** (gain_db / 40)
lowerCAmelCase_ : Any = 1 + alpha * big_a
lowerCAmelCase_ : Any = -2 * _cos
lowerCAmelCase_ : List[Any] = 1 - alpha * big_a
lowerCAmelCase_ : str = 1 + alpha / big_a
lowerCAmelCase_ : List[Any] = -2 * _cos
lowerCAmelCase_ : str = 1 - alpha / big_a
lowerCAmelCase_ : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def UpperCamelCase( __UpperCamelCase : int ,__UpperCamelCase : int ,__UpperCamelCase : float ,__UpperCamelCase : float = 1 / sqrt(2 ) ,):
lowerCAmelCase_ : Any = tau * frequency / samplerate
lowerCAmelCase_ : Dict = sin(lowercase__ )
lowerCAmelCase_ : List[str] = cos(lowercase__ )
lowerCAmelCase_ : str = _sin / (2 * q_factor)
lowerCAmelCase_ : List[str] = 10 ** (gain_db / 40)
lowerCAmelCase_ : List[Any] = (big_a + 1) - (big_a - 1) * _cos
lowerCAmelCase_ : int = (big_a + 1) + (big_a - 1) * _cos
lowerCAmelCase_ : Dict = (big_a - 1) - (big_a + 1) * _cos
lowerCAmelCase_ : int = (big_a - 1) + (big_a + 1) * _cos
lowerCAmelCase_ : Optional[Any] = 2 * sqrt(lowercase__ ) * alpha
lowerCAmelCase_ : Optional[Any] = big_a * (pmc + aaa)
lowerCAmelCase_ : Any = 2 * big_a * mpc
lowerCAmelCase_ : Optional[Any] = big_a * (pmc - aaa)
lowerCAmelCase_ : Optional[Any] = ppmc + aaa
lowerCAmelCase_ : List[Any] = -2 * pmpc
lowerCAmelCase_ : Any = ppmc - aaa
lowerCAmelCase_ : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def UpperCamelCase( __UpperCamelCase : int ,__UpperCamelCase : int ,__UpperCamelCase : float ,__UpperCamelCase : float = 1 / sqrt(2 ) ,):
lowerCAmelCase_ : Dict = tau * frequency / samplerate
lowerCAmelCase_ : List[str] = sin(lowercase__ )
lowerCAmelCase_ : Optional[Any] = cos(lowercase__ )
lowerCAmelCase_ : Optional[Any] = _sin / (2 * q_factor)
lowerCAmelCase_ : List[Any] = 10 ** (gain_db / 40)
lowerCAmelCase_ : Dict = (big_a + 1) - (big_a - 1) * _cos
lowerCAmelCase_ : List[Any] = (big_a + 1) + (big_a - 1) * _cos
lowerCAmelCase_ : Optional[Any] = (big_a - 1) - (big_a + 1) * _cos
lowerCAmelCase_ : Optional[int] = (big_a - 1) + (big_a + 1) * _cos
lowerCAmelCase_ : int = 2 * sqrt(lowercase__ ) * alpha
lowerCAmelCase_ : str = big_a * (ppmc + aaa)
lowerCAmelCase_ : List[Any] = -2 * big_a * pmpc
lowerCAmelCase_ : Tuple = big_a * (ppmc - aaa)
lowerCAmelCase_ : List[str] = pmc + aaa
lowerCAmelCase_ : Any = 2 * mpc
lowerCAmelCase_ : Optional[Any] = pmc - aaa
lowerCAmelCase_ : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
| 171 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class _lowercase ( unittest.TestCase ):
def a ( self : int ) -> List[str]:
__snake_case = '| <pad> <unk> <s> </s> a b c d e f g h i j k'.split()
__snake_case = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
__snake_case = {
'unk_token': '<unk>',
'bos_token': '<s>',
'eos_token': '</s>',
}
__snake_case = {
'feature_size': 1,
'padding_value': 0.0,
'sampling_rate': 1_6000,
'return_attention_mask': False,
'do_normalize': True,
}
__snake_case = tempfile.mkdtemp()
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__snake_case = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE_ )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
with open(self.feature_extraction_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
# load decoder from hub
__snake_case = 'hf-internal-testing/ngram-beam-search-decoder'
def a ( self : Optional[int] , **SCREAMING_SNAKE_CASE_ : Tuple ) -> Dict:
__snake_case = self.add_kwargs_tokens_map.copy()
kwargs.update(SCREAMING_SNAKE_CASE_ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] , **SCREAMING_SNAKE_CASE_ : Any ) -> Optional[Any]:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def a ( self : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> Tuple:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **SCREAMING_SNAKE_CASE_ )
def a ( self : int ) -> Dict:
shutil.rmtree(self.tmpdirname )
def a ( self : int ) -> Tuple:
__snake_case = self.get_tokenizer()
__snake_case = self.get_feature_extractor()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
__snake_case = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE_ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , SCREAMING_SNAKE_CASE_ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , SCREAMING_SNAKE_CASE_ )
def a ( self : Dict ) -> Union[str, Any]:
__snake_case = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__snake_case = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def a ( self : str ) -> Tuple:
__snake_case = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['xx'] )
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE_ , 'include' ):
WavaVecaProcessorWithLM(
tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def a ( self : List[str] ) -> List[str]:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = floats_list((3, 1000) )
__snake_case = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
__snake_case = processor(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def a ( self : Tuple ) -> Tuple:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = 'This is a test string'
__snake_case = processor(text=SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer(SCREAMING_SNAKE_CASE_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a ( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any]=(2, 10, 16) , SCREAMING_SNAKE_CASE_ : Dict=77 ) -> Dict:
np.random.seed(SCREAMING_SNAKE_CASE_ )
return np.random.rand(*SCREAMING_SNAKE_CASE_ )
def a ( self : Any ) -> Tuple:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits(shape=(10, 16) , seed=13 )
__snake_case = processor.decode(SCREAMING_SNAKE_CASE_ )
__snake_case = decoder.decode_beams(SCREAMING_SNAKE_CASE_ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('</s> <s> </s>' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['fork'], ['spawn']] )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] ) -> Dict:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__snake_case = processor.batch_decode(SCREAMING_SNAKE_CASE_ )
else:
with get_context(SCREAMING_SNAKE_CASE_ ).Pool() as pool:
__snake_case = processor.batch_decode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = list(SCREAMING_SNAKE_CASE_ )
with get_context('fork' ).Pool() as p:
__snake_case = decoder.decode_beams_batch(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case , __snake_case , __snake_case = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.text )
self.assertListEqual(['<s> <s> </s>', '<s> <s> <s>'] , decoded_processor.text )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.logit_score )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.lm_score )
def a ( self : Any ) -> Dict:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits()
__snake_case = 15
__snake_case = -2_0.0
__snake_case = -4.0
__snake_case = processor.batch_decode(
SCREAMING_SNAKE_CASE_ , beam_width=SCREAMING_SNAKE_CASE_ , beam_prune_logp=SCREAMING_SNAKE_CASE_ , token_min_logp=SCREAMING_SNAKE_CASE_ , )
__snake_case = decoded_processor_out.text
__snake_case = list(SCREAMING_SNAKE_CASE_ )
with get_context('fork' ).Pool() as pool:
__snake_case = decoder.decode_beams_batch(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , beam_width=SCREAMING_SNAKE_CASE_ , beam_prune_logp=SCREAMING_SNAKE_CASE_ , token_min_logp=SCREAMING_SNAKE_CASE_ , )
__snake_case = [d[0][0] for d in decoded_decoder_out]
__snake_case = [d[0][2] for d in decoded_decoder_out]
__snake_case = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(['</s> <s> <s>', '<s> <s> <s>'] , SCREAMING_SNAKE_CASE_ )
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
def a ( self : Optional[Any] ) -> Tuple:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits()
__snake_case = 2.0
__snake_case = 5.0
__snake_case = -2_0.0
__snake_case = True
__snake_case = processor.batch_decode(
SCREAMING_SNAKE_CASE_ , alpha=SCREAMING_SNAKE_CASE_ , beta=SCREAMING_SNAKE_CASE_ , unk_score_offset=SCREAMING_SNAKE_CASE_ , lm_score_boundary=SCREAMING_SNAKE_CASE_ , )
__snake_case = decoded_processor_out.text
__snake_case = list(SCREAMING_SNAKE_CASE_ )
decoder.reset_params(
alpha=SCREAMING_SNAKE_CASE_ , beta=SCREAMING_SNAKE_CASE_ , unk_score_offset=SCREAMING_SNAKE_CASE_ , lm_score_boundary=SCREAMING_SNAKE_CASE_ , )
with get_context('fork' ).Pool() as pool:
__snake_case = decoder.decode_beams_batch(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
__snake_case = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(['<s> </s> <s> </s> </s>', '</s> </s> <s> </s> </s>'] , SCREAMING_SNAKE_CASE_ )
__snake_case = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -2_0.0 )
self.assertEqual(lm_model.score_boundary , SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] ) -> List[str]:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = processor.decoder.model_container[processor.decoder._model_key]
__snake_case = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
__snake_case = os.listdir(SCREAMING_SNAKE_CASE_ )
__snake_case = ['alphabet.json', 'language_model']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Dict ) -> Dict:
__snake_case = snapshot_download('hf-internal-testing/processor_with_lm' )
__snake_case = WavaVecaProcessorWithLM.from_pretrained(SCREAMING_SNAKE_CASE_ )
__snake_case = processor.decoder.model_container[processor.decoder._model_key]
__snake_case = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
__snake_case = os.listdir(SCREAMING_SNAKE_CASE_ )
__snake_case = os.listdir(SCREAMING_SNAKE_CASE_ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Any ) -> List[Any]:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = AutoProcessor.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = floats_list((3, 1000) )
__snake_case = processor_wavaveca(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
__snake_case = processor_auto(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
__snake_case = self._get_dummy_logits()
__snake_case = processor_wavaveca.batch_decode(SCREAMING_SNAKE_CASE_ )
__snake_case = processor_auto.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def a ( self : Dict ) -> Optional[int]:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
@staticmethod
def a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int:
__snake_case = [d[key] for d in offsets]
return retrieved_list
def a ( self : Optional[int] ) -> str:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = self._get_dummy_logits()[0]
__snake_case = processor.decode(SCREAMING_SNAKE_CASE_ , output_word_offsets=SCREAMING_SNAKE_CASE_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(' '.join(self.get_from_offsets(outputs['word_offsets'] , 'word' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'end_offset' ) , [1, 3, 5] )
def a ( self : Optional[Any] ) -> Optional[int]:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = self._get_dummy_logits()
__snake_case = processor.batch_decode(SCREAMING_SNAKE_CASE_ , output_word_offsets=SCREAMING_SNAKE_CASE_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(
[' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) for o in outputs['word_offsets']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'end_offset' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def a ( self : Optional[Any] ) -> Optional[Any]:
import torch
__snake_case = load_dataset('common_voice' , 'en' , split='train' , streaming=SCREAMING_SNAKE_CASE_ )
__snake_case = ds.cast_column('audio' , datasets.Audio(sampling_rate=1_6000 ) )
__snake_case = iter(SCREAMING_SNAKE_CASE_ )
__snake_case = next(SCREAMING_SNAKE_CASE_ )
__snake_case = AutoProcessor.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
__snake_case = WavaVecaForCTC.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__snake_case = processor(sample['audio']['array'] , return_tensors='pt' ).input_values
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ ).logits.cpu().numpy()
__snake_case = processor.decode(logits[0] , output_word_offsets=SCREAMING_SNAKE_CASE_ )
__snake_case = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__snake_case = [
{
'start_time': d['start_offset'] * time_offset,
'end_time': d['end_offset'] * time_offset,
'word': d['word'],
}
for d in output['word_offsets']
]
__snake_case = 'WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'
# output words
self.assertEqual(' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) , output.text )
# output times
__snake_case = torch.tensor(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'start_time' ) )
__snake_case = torch.tensor(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'end_time' ) )
# fmt: off
__snake_case = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
__snake_case = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=0.0_1 ) )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=0.0_1 ) )
| 56 | 0 |
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
_SCREAMING_SNAKE_CASE = 25_60_47
_SCREAMING_SNAKE_CASE = 25_61_45
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : Dict =NllbTokenizer
__lowerCAmelCase : Tuple =NllbTokenizerFast
__lowerCAmelCase : str =True
__lowerCAmelCase : Dict =True
__lowerCAmelCase : Any ={}
def UpperCamelCase__ ( self :List[str]):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_lowercase =NllbTokenizer(SCREAMING_SNAKE_CASE_, keep_accents=SCREAMING_SNAKE_CASE_)
tokenizer.save_pretrained(self.tmpdirname)
def UpperCamelCase__ ( self :int):
"""simple docstring"""
_lowercase =NllbTokenizer(SCREAMING_SNAKE_CASE_, keep_accents=SCREAMING_SNAKE_CASE_)
_lowercase =tokenizer.tokenize('This is a test')
self.assertListEqual(SCREAMING_SNAKE_CASE_, ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], )
_lowercase =tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
SCREAMING_SNAKE_CASE_, [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
], )
_lowercase =tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_)
self.assertListEqual(
SCREAMING_SNAKE_CASE_, [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
], )
_lowercase =tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_)
self.assertListEqual(
SCREAMING_SNAKE_CASE_, [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
], )
def UpperCamelCase__ ( self :List[str]):
"""simple docstring"""
_lowercase =(self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-nllb', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})'''):
_lowercase =self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_)
_lowercase =self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_)
_lowercase =tempfile.mkdtemp()
_lowercase =tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_)
_lowercase =tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_)
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files))
_lowercase =tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f)
self.assertSequenceEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_)
# Checks everything loads correctly in the same way
_lowercase =tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_)
_lowercase =tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_))
shutil.rmtree(SCREAMING_SNAKE_CASE_)
# Save tokenizer rust, legacy_format=True
_lowercase =tempfile.mkdtemp()
_lowercase =tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_, legacy_format=SCREAMING_SNAKE_CASE_)
_lowercase =tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_)
# Checks it save with the same files
self.assertSequenceEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_)
# Checks everything loads correctly in the same way
_lowercase =tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_)
_lowercase =tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_))
shutil.rmtree(SCREAMING_SNAKE_CASE_)
# Save tokenizer rust, legacy_format=False
_lowercase =tempfile.mkdtemp()
_lowercase =tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_, legacy_format=SCREAMING_SNAKE_CASE_)
_lowercase =tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_)
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files))
# Checks everything loads correctly in the same way
_lowercase =tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_)
_lowercase =tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_))
shutil.rmtree(SCREAMING_SNAKE_CASE_)
@require_torch
def UpperCamelCase__ ( self :Dict):
"""simple docstring"""
if not self.test_seqaseq:
return
_lowercase =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}'''):
# Longer text that will definitely require truncation.
_lowercase =[
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for'
' Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons'
' will only worsen the violence and misery for millions of people.',
]
_lowercase =[
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al'
' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi'
' că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
try:
_lowercase =tokenizer.prepare_seqaseq_batch(
src_texts=SCREAMING_SNAKE_CASE_, tgt_texts=SCREAMING_SNAKE_CASE_, max_length=3, max_target_length=10, return_tensors='pt', src_lang='eng_Latn', tgt_lang='ron_Latn', )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1], 3)
self.assertEqual(batch.labels.shape[1], 10)
# max_target_length will default to max_length if not specified
_lowercase =tokenizer.prepare_seqaseq_batch(
SCREAMING_SNAKE_CASE_, tgt_texts=SCREAMING_SNAKE_CASE_, max_length=3, return_tensors='pt')
self.assertEqual(batch.input_ids.shape[1], 3)
self.assertEqual(batch.labels.shape[1], 3)
_lowercase =tokenizer.prepare_seqaseq_batch(
src_texts=SCREAMING_SNAKE_CASE_, max_length=3, max_target_length=10, return_tensors='pt')
self.assertEqual(batch_encoder_only.input_ids.shape[1], 3)
self.assertEqual(batch_encoder_only.attention_mask.shape[1], 3)
self.assertNotIn('decoder_input_ids', SCREAMING_SNAKE_CASE_)
@unittest.skip('Unfortunately way too slow to build a BPE with SentencePiece.')
def UpperCamelCase__ ( self :Any):
"""simple docstring"""
pass
def UpperCamelCase__ ( self :str):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})'''):
_lowercase =[AddedToken('<special>', lstrip=SCREAMING_SNAKE_CASE_)]
_lowercase =self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE_, additional_special_tokens=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_)
_lowercase =tokenizer_r.encode('Hey this is a <special> token')
_lowercase =tokenizer_r.encode('<special>', add_special_tokens=SCREAMING_SNAKE_CASE_)[0]
self.assertTrue(special_token_id in r_output)
if self.test_slow_tokenizer:
_lowercase =self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE_, additional_special_tokens=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
_lowercase =self.tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE_, additional_special_tokens=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_)
_lowercase =tokenizer_p.encode('Hey this is a <special> token')
_lowercase =tokenizer_cr.encode('Hey this is a <special> token')
self.assertEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_)
self.assertEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_)
self.assertTrue(special_token_id in p_output)
self.assertTrue(special_token_id in cr_output)
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] ="facebook/nllb-200-distilled-600M"
__lowerCAmelCase : Optional[Any] =[
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
__lowerCAmelCase : Optional[int] =[
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
__lowerCAmelCase : List[str] =[
2_5_6_0_4_7,
1_6_2_9_7,
1_3_4_4_0_8,
8_1_6_5,
2_4_8_0_6_6,
1_4_7_3_4,
9_5_0,
1_1_3_5,
1_0_5_7_2_1,
3_5_7_3,
8_3,
2_7_3_5_2,
1_0_8,
4_9_4_8_6,
2,
]
@classmethod
def UpperCamelCase__ ( cls :Union[str, Any]):
"""simple docstring"""
_lowercase =NllbTokenizer.from_pretrained(
cls.checkpoint_name, src_lang='eng_Latn', tgt_lang='ron_Latn')
_lowercase =1
return cls
def UpperCamelCase__ ( self :int):
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Arab'], 25_6001)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Latn'], 25_6002)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['fra_Latn'], 25_6057)
def UpperCamelCase__ ( self :Optional[int]):
"""simple docstring"""
_lowercase =self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens, SCREAMING_SNAKE_CASE_)
def UpperCamelCase__ ( self :Tuple):
"""simple docstring"""
self.assertIn(SCREAMING_SNAKE_CASE_, self.tokenizer.all_special_ids)
# fmt: off
_lowercase =[RO_CODE, 4254, 9_8068, 11_2923, 3_9072, 3909, 713, 10_2767, 26, 1_7314, 3_5642, 1_4683, 3_3118, 2022, 6_6987, 2, 25_6047]
# fmt: on
_lowercase =self.tokenizer.decode(SCREAMING_SNAKE_CASE_, skip_special_tokens=SCREAMING_SNAKE_CASE_)
_lowercase =self.tokenizer.decode(generated_ids[1:], skip_special_tokens=SCREAMING_SNAKE_CASE_)
self.assertEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_)
self.assertNotIn(self.tokenizer.eos_token, SCREAMING_SNAKE_CASE_)
def UpperCamelCase__ ( self :List[Any]):
"""simple docstring"""
_lowercase =['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0], SCREAMING_SNAKE_CASE_)
_lowercase =10
_lowercase =self.tokenizer(SCREAMING_SNAKE_CASE_, max_length=SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_).input_ids[0]
self.assertEqual(ids[-1], 2)
self.assertEqual(ids[0], SCREAMING_SNAKE_CASE_)
self.assertEqual(len(SCREAMING_SNAKE_CASE_), SCREAMING_SNAKE_CASE_)
def UpperCamelCase__ ( self :Any):
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR']), [25_6203, 3])
def UpperCamelCase__ ( self :str):
"""simple docstring"""
_lowercase =tempfile.mkdtemp()
_lowercase =self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_)
_lowercase =NllbTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_)
self.assertDictEqual(new_tok.fairseq_tokens_to_ids, SCREAMING_SNAKE_CASE_)
@require_torch
def UpperCamelCase__ ( self :str):
"""simple docstring"""
_lowercase =self.tokenizer(
self.src_text, text_target=self.tgt_text, padding=SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=len(self.expected_src_tokens), return_tensors='pt', )
_lowercase =shift_tokens_right(
batch['labels'], self.tokenizer.pad_token_id, self.tokenizer.lang_code_to_id['ron_Latn'])
self.assertIsInstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_)
self.assertEqual((2, 15), batch.input_ids.shape)
self.assertEqual((2, 15), batch.attention_mask.shape)
_lowercase =batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens, SCREAMING_SNAKE_CASE_)
self.assertEqual(SCREAMING_SNAKE_CASE_, batch.decoder_input_ids[0, 0]) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens, [EN_CODE])
self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id])
def UpperCamelCase__ ( self :int):
"""simple docstring"""
_lowercase =self.tokenizer(self.src_text, padding=SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=3, return_tensors='pt')
_lowercase =self.tokenizer(
text_target=self.tgt_text, padding=SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=10, return_tensors='pt')
_lowercase =targets['input_ids']
_lowercase =shift_tokens_right(
SCREAMING_SNAKE_CASE_, self.tokenizer.pad_token_id, decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang], )
self.assertEqual(batch.input_ids.shape[1], 3)
self.assertEqual(batch.decoder_input_ids.shape[1], 10)
@require_torch
def UpperCamelCase__ ( self :Union[str, Any]):
"""simple docstring"""
_lowercase =self.tokenizer._build_translation_inputs(
'A test', return_tensors='pt', src_lang='eng_Latn', tgt_lang='fra_Latn')
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_), {
# A, test, EOS, en_XX
'input_ids': [[25_6047, 70, 7356, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 25_6057,
}, )
@require_torch
def UpperCamelCase__ ( self :List[str]):
"""simple docstring"""
_lowercase =True
_lowercase =self.tokenizer(
'UN Chief says there is no military solution in Syria', src_lang='eng_Latn', tgt_lang='fra_Latn')
self.assertEqual(
inputs.input_ids, [1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2, 25_6047])
_lowercase =False
_lowercase =self.tokenizer(
'UN Chief says there is no military solution in Syria', src_lang='eng_Latn', tgt_lang='fra_Latn')
self.assertEqual(
inputs.input_ids, [25_6047, 1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2])
| 181 |
'''simple docstring'''
def _a (lowercase__ : int , lowercase__ : int ) -> float:
"""simple docstring"""
return base * power(lowercase__ , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print("Raise base to the power of exponent using recursion...")
_a : Union[str, Any] = int(input("Enter the base: ").strip())
_a : Any = int(input("Enter the exponent: ").strip())
_a : List[str] = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
_a : List[Any] = 1 / result
print(f'''{base} to the power of {exponent} is {result}''')
| 56 | 0 |
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
UpperCAmelCase_ = "scheduler_config.json"
class __magic_name__ ( __lowercase ):
"""simple docstring"""
lowerCAmelCase : List[str] = 1
lowerCAmelCase : Any = 2
lowerCAmelCase : int = 3
lowerCAmelCase : str = 4
lowerCAmelCase : int = 5
@dataclass
class __magic_name__ ( __lowercase ):
"""simple docstring"""
lowerCAmelCase : jnp.ndarray
class __magic_name__ :
"""simple docstring"""
lowerCAmelCase : Optional[Any] = SCHEDULER_CONFIG_NAME
lowerCAmelCase : Any = ["dtype"]
lowerCAmelCase : Tuple = []
lowerCAmelCase : Any = True
@classmethod
def lowerCAmelCase ( cls : List[Any] , _lowercase : Dict[str, Any] = None , _lowercase : Optional[str] = None , _lowercase : List[Any]=False , **_lowercase : Dict , ):
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase: Optional[int] = cls.load_config(
pretrained_model_name_or_path=SCREAMING_SNAKE_CASE_ , subfolder=SCREAMING_SNAKE_CASE_ , return_unused_kwargs=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
_UpperCamelCase , _UpperCamelCase: str = cls.from_config(SCREAMING_SNAKE_CASE_ , return_unused_kwargs=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if hasattr(SCREAMING_SNAKE_CASE_ , '''create_state''' ) and getattr(SCREAMING_SNAKE_CASE_ , '''has_state''' , SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase: List[Any] = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def lowerCAmelCase ( self : str , _lowercase : Union[str, os.PathLike] , _lowercase : bool = False , **_lowercase : Union[str, Any] ):
"""simple docstring"""
self.save_config(save_directory=SCREAMING_SNAKE_CASE_ , push_to_hub=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def lowerCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
return self._get_compatibles()
@classmethod
def lowerCAmelCase ( cls : Any ):
"""simple docstring"""
_UpperCamelCase: Optional[Any] = list(set([cls.__name__] + cls._compatibles ) )
_UpperCamelCase: List[Any] = importlib.import_module(__name__.split('''.''' )[0] )
_UpperCamelCase: Dict = [
getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for c in compatible_classes_str if hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
]
return compatible_classes
def lowerCAmelCase_ ( lowercase: jnp.ndarray , lowercase: Tuple[int] ) -> jnp.ndarray:
'''simple docstring'''
assert len(lowercase__ ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(lowercase__ ) - x.ndim) ) , lowercase__ )
def lowerCAmelCase_ ( lowercase: int , lowercase: Optional[Any]=0.999 , lowercase: Dict=jnp.floataa ) -> jnp.ndarray:
'''simple docstring'''
def alpha_bar(lowercase: Union[str, Any] ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
_UpperCamelCase: Union[str, Any] = []
for i in range(lowercase__ ):
_UpperCamelCase: Optional[int] = i / num_diffusion_timesteps
_UpperCamelCase: Dict = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(lowercase__ ) / alpha_bar(lowercase__ ) , lowercase__ ) )
return jnp.array(lowercase__ , dtype=lowercase__ )
@flax.struct.dataclass
class __magic_name__ :
"""simple docstring"""
lowerCAmelCase : jnp.ndarray
lowerCAmelCase : jnp.ndarray
lowerCAmelCase : jnp.ndarray
@classmethod
def lowerCAmelCase ( cls : List[str] , _lowercase : Union[str, Any] ):
"""simple docstring"""
_UpperCamelCase: str = scheduler.config
if config.trained_betas is not None:
_UpperCamelCase: Tuple = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
_UpperCamelCase: Dict = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_UpperCamelCase: Optional[Any] = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_UpperCamelCase: str = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f"""beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}""" )
_UpperCamelCase: Dict = 1.0 - betas
_UpperCamelCase: Optional[Any] = jnp.cumprod(SCREAMING_SNAKE_CASE_ , axis=0 )
return cls(
alphas=SCREAMING_SNAKE_CASE_ , betas=SCREAMING_SNAKE_CASE_ , alphas_cumprod=SCREAMING_SNAKE_CASE_ , )
def lowerCAmelCase_ ( lowercase: CommonSchedulerState , lowercase: jnp.ndarray , lowercase: jnp.ndarray , lowercase: jnp.ndarray ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase: List[Any] = state.alphas_cumprod
_UpperCamelCase: Tuple = alphas_cumprod[timesteps] ** 0.5
_UpperCamelCase: Optional[int] = sqrt_alpha_prod.flatten()
_UpperCamelCase: List[Any] = broadcast_to_shape_from_left(lowercase__ , original_samples.shape )
_UpperCamelCase: Dict = (1 - alphas_cumprod[timesteps]) ** 0.5
_UpperCamelCase: Optional[Any] = sqrt_one_minus_alpha_prod.flatten()
_UpperCamelCase: Optional[Any] = broadcast_to_shape_from_left(lowercase__ , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def lowerCAmelCase_ ( lowercase: CommonSchedulerState , lowercase: jnp.ndarray , lowercase: jnp.ndarray , lowercase: jnp.ndarray ) -> int:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase: str = get_sqrt_alpha_prod(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
_UpperCamelCase: str = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def lowerCAmelCase_ ( lowercase: CommonSchedulerState , lowercase: jnp.ndarray , lowercase: jnp.ndarray , lowercase: jnp.ndarray ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase: Dict = get_sqrt_alpha_prod(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
_UpperCamelCase: List[str] = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 271 |
'''simple docstring'''
import math
from collections.abc import Callable
def _a (lowercase__ : Callable[[float], float] , lowercase__ : float , lowercase__ : float ) -> float:
"""simple docstring"""
__snake_case = xa
__snake_case = xa
while True:
if x_n == x_na or function(lowercase__ ) == function(lowercase__ ):
raise ZeroDivisionError('float division by zero, could not find root' )
__snake_case = x_na - (
function(lowercase__ ) / ((function(lowercase__ ) - function(lowercase__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 1_0**-5:
return x_na
__snake_case = x_na
__snake_case = x_na
def _a (lowercase__ : float ) -> float:
"""simple docstring"""
return math.pow(lowercase__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 56 | 0 |
"""simple docstring"""
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
_UpperCAmelCase = (low + high) // 2
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = max_subarray(lowercase__ , lowercase__ , lowercase__ )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = max_subarray(lowercase__ , mid + 1 , lowercase__ )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = max_cross_sum(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = float("-inf" ), -1
_UpperCAmelCase , _UpperCAmelCase = float("-inf" ), -1
_UpperCAmelCase = 0
for i in range(lowercase__ , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
_UpperCAmelCase = summ
_UpperCAmelCase = i
_UpperCAmelCase = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
_UpperCAmelCase = summ
_UpperCAmelCase = i
return max_left, max_right, (left_sum + right_sum)
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = [randint(1 , lowercase__ ) for _ in range(lowercase__ )]
_UpperCAmelCase = time.time()
max_subarray(lowercase__ , 0 , input_size - 1 )
_UpperCAmelCase = time.time()
return end - start
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = [10, 100, 1000, 1_0000, 5_0000, 10_0000, 20_0000, 30_0000, 40_0000, 50_0000]
_UpperCAmelCase = [time_max_subarray(lowercase__ ) for input_size in input_sizes]
print("No of Inputs\t\tTime Taken" )
for input_size, runtime in zip(lowercase__ , lowercase__ ):
print(lowercase__ , "\t\t" , lowercase__ )
plt.plot(lowercase__ , lowercase__ )
plt.xlabel("Number of Inputs" )
plt.ylabel("Time taken in seconds" )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 657 |
'''simple docstring'''
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class _lowercase ( __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : str = CpmAntTokenizer
_SCREAMING_SNAKE_CASE : Optional[Any] = False
def a ( self : Optional[Any] ) -> Any:
super().setUp()
__snake_case = [
'<d>',
'</d>',
'<s>',
'</s>',
'</_>',
'<unk>',
'<pad>',
'</n>',
'我',
'是',
'C',
'P',
'M',
'A',
'n',
't',
]
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
@tooslow
def a ( self : List[Any] ) -> Dict:
__snake_case = CpmAntTokenizer.from_pretrained('openbmb/cpm-ant-10b' )
__snake_case = '今天天气真好!'
__snake_case = ['今天', '天气', '真', '好', '!']
__snake_case = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = '今天天气真好!'
__snake_case = [tokenizer.bos_token] + tokens
__snake_case = [6, 9802, 1_4962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 56 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ : int = {
"configuration_informer": [
"INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[Any] = [
"INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"InformerForPrediction",
"InformerModel",
"InformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
lowercase_ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 588 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def _a (lowercase__ : list ) -> int:
"""simple docstring"""
if not postfix_notation:
return 0
__snake_case = {'+', '-', '*', '/'}
__snake_case = []
for token in postfix_notation:
if token in operations:
__snake_case , __snake_case = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(lowercase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 | 0 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
a : str = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class a ( __lowercase ):
snake_case_ = "git_vision_model"
def __init__( self : Dict , lowercase_ : Optional[int]=768 , lowercase_ : Optional[Any]=3072 , lowercase_ : str=12 , lowercase_ : Union[str, Any]=12 , lowercase_ : Tuple=3 , lowercase_ : Optional[int]=224 , lowercase_ : Dict=16 , lowercase_ : List[Any]="quick_gelu" , lowercase_ : List[Any]=1e-5 , lowercase_ : Optional[Any]=0.0 , lowercase_ : int=0.02 , **lowercase_ : Optional[int] , ):
super().__init__(**SCREAMING_SNAKE_CASE_ )
snake_case_ = hidden_size
snake_case_ = intermediate_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = num_channels
snake_case_ = patch_size
snake_case_ = image_size
snake_case_ = initializer_range
snake_case_ = attention_dropout
snake_case_ = layer_norm_eps
snake_case_ = hidden_act
@classmethod
def A_ ( cls : Tuple , lowercase_ : Union[str, os.PathLike] , **lowercase_ : List[Any] ):
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE_ )
snake_case_ ,snake_case_ = cls.get_config_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get('''model_type''' ) == "git":
snake_case_ = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
class a ( __lowercase ):
snake_case_ = "git"
def __init__( self : int , lowercase_ : Optional[Any]=None , lowercase_ : List[Any]=3_0522 , lowercase_ : Optional[Any]=768 , lowercase_ : Union[str, Any]=6 , lowercase_ : Any=12 , lowercase_ : Tuple=3072 , lowercase_ : Dict="gelu" , lowercase_ : Optional[Any]=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : Union[str, Any]=1024 , lowercase_ : Optional[Any]=0.02 , lowercase_ : str=1e-12 , lowercase_ : str=0 , lowercase_ : int="absolute" , lowercase_ : Optional[Any]=True , lowercase_ : int=False , lowercase_ : List[Any]=101 , lowercase_ : Dict=102 , lowercase_ : str=None , **lowercase_ : Optional[int] , ):
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , pad_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if vision_config is None:
snake_case_ = {}
logger.info('''vision_config is None. initializing the GitVisionConfig with default values.''' )
snake_case_ = GitVisionConfig(**SCREAMING_SNAKE_CASE_ )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = hidden_act
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = position_embedding_type
snake_case_ = use_cache
snake_case_ = tie_word_embeddings
snake_case_ = num_image_with_embedding
snake_case_ = bos_token_id
snake_case_ = eos_token_id
def A_ ( self : Optional[Any] ):
snake_case_ = copy.deepcopy(self.__dict__ )
snake_case_ = self.vision_config.to_dict()
snake_case_ = self.__class__.model_type
return output
| 640 |
'''simple docstring'''
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square(lowercase__ : int , lowercase__ : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
__snake_case = update_area_of_max_square(lowercase__ , col + 1 )
__snake_case = update_area_of_max_square(row + 1 , col + 1 )
__snake_case = update_area_of_max_square(row + 1 , lowercase__ )
if mat[row][col]:
__snake_case = 1 + min([right, diagonal, down] )
__snake_case = max(largest_square_area[0] , lowercase__ )
return sub_problem_sol
else:
return 0
__snake_case = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
__snake_case = update_area_of_max_square_using_dp_array(lowercase__ , col + 1 , lowercase__ )
__snake_case = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , lowercase__ )
__snake_case = update_area_of_max_square_using_dp_array(row + 1 , lowercase__ , lowercase__ )
if mat[row][col]:
__snake_case = 1 + min([right, diagonal, down] )
__snake_case = max(largest_square_area[0] , lowercase__ )
__snake_case = sub_problem_sol
return sub_problem_sol
else:
return 0
__snake_case = [0]
__snake_case = [[-1] * cols for _ in range(lowercase__ )]
update_area_of_max_square_using_dp_array(0 , 0 , lowercase__ )
return largest_square_area[0]
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
"""simple docstring"""
__snake_case = [[0] * (cols + 1) for _ in range(rows + 1 )]
__snake_case = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__snake_case = dp_array[row][col + 1]
__snake_case = dp_array[row + 1][col + 1]
__snake_case = dp_array[row + 1][col]
if mat[row][col] == 1:
__snake_case = 1 + min(lowercase__ , lowercase__ , lowercase__ )
__snake_case = max(dp_array[row][col] , lowercase__ )
else:
__snake_case = 0
return largest_square_area
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
"""simple docstring"""
__snake_case = [0] * (cols + 1)
__snake_case = [0] * (cols + 1)
__snake_case = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__snake_case = current_row[col + 1]
__snake_case = next_row[col + 1]
__snake_case = next_row[col]
if mat[row][col] == 1:
__snake_case = 1 + min(lowercase__ , lowercase__ , lowercase__ )
__snake_case = max(current_row[col] , lowercase__ )
else:
__snake_case = 0
__snake_case = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 56 | 0 |
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
__UpperCAmelCase = 8
def lowercase__ ( __snake_case : str , __snake_case : Tuple=BITS ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = x.device
UpperCAmelCase_ : Optional[Any] = (x * 255).int().clamp(0 , 255 )
UpperCAmelCase_ : Any = 2 ** torch.arange(bits - 1 , -1 , -1 , device=lowercase__ )
UpperCAmelCase_ : Optional[Any] = rearrange(lowercase__ , 'd -> d 1 1' )
UpperCAmelCase_ : List[Any] = rearrange(lowercase__ , 'b c h w -> b c 1 h w' )
UpperCAmelCase_ : int = ((x & mask) != 0).float()
UpperCAmelCase_ : str = rearrange(lowercase__ , 'b c d h w -> b (c d) h w' )
UpperCAmelCase_ : Any = bits * 2 - 1
return bits
def lowercase__ ( __snake_case : List[Any] , __snake_case : int=BITS ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = x.device
UpperCAmelCase_ : Any = (x > 0).int()
UpperCAmelCase_ : Dict = 2 ** torch.arange(bits - 1 , -1 , -1 , device=lowercase__ , dtype=torch.intaa )
UpperCAmelCase_ : Any = rearrange(lowercase__ , 'd -> d 1 1' )
UpperCAmelCase_ : str = rearrange(lowercase__ , 'b (c d) h w -> b c d h w' , d=8 )
UpperCAmelCase_ : Optional[int] = reduce(x * mask , 'b c d h w -> b c h w' , 'sum' )
return (dec / 255).clamp(0.0 , 1.0 )
def lowercase__ ( self : List[Any] , __snake_case : torch.FloatTensor , __snake_case : int , __snake_case : torch.FloatTensor , __snake_case : float = 0.0 , __snake_case : bool = True , __snake_case : List[str]=None , __snake_case : bool = True , ):
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
UpperCAmelCase_ : str = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
UpperCAmelCase_ : Tuple = self.alphas_cumprod[timestep]
UpperCAmelCase_ : Dict = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
UpperCAmelCase_ : Union[str, Any] = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCAmelCase_ : Union[str, Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
UpperCAmelCase_ : Optional[Any] = self.bit_scale
if self.config.clip_sample:
UpperCAmelCase_ : Dict = torch.clamp(lowercase__ , -scale , lowercase__ )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
UpperCAmelCase_ : int = self._get_variance(lowercase__ , lowercase__ )
UpperCAmelCase_ : List[str] = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
UpperCAmelCase_ : Any = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCAmelCase_ : Optional[Any] = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCAmelCase_ : List[Any] = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
UpperCAmelCase_ : List[Any] = model_output.device if torch.is_tensor(lowercase__ ) else 'cpu'
UpperCAmelCase_ : Union[str, Any] = torch.randn(model_output.shape , dtype=model_output.dtype , generator=lowercase__ ).to(lowercase__ )
UpperCAmelCase_ : Optional[int] = self._get_variance(lowercase__ , lowercase__ ) ** 0.5 * eta * noise
UpperCAmelCase_ : List[str] = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=lowercase__ , pred_original_sample=lowercase__ )
def lowercase__ ( self : int , __snake_case : torch.FloatTensor , __snake_case : int , __snake_case : torch.FloatTensor , __snake_case : Optional[int]="epsilon" , __snake_case : List[str]=None , __snake_case : bool = True , ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = torch.split(lowercase__ , sample.shape[1] , dim=1 )
else:
UpperCAmelCase_ : List[Any] = None
# 1. compute alphas, betas
UpperCAmelCase_ : str = self.alphas_cumprod[t]
UpperCAmelCase_ : Optional[int] = self.alphas_cumprod[t - 1] if t > 0 else self.one
UpperCAmelCase_ : List[str] = 1 - alpha_prod_t
UpperCAmelCase_ : List[str] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
UpperCAmelCase_ : Optional[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
UpperCAmelCase_ : Optional[Any] = model_output
else:
raise ValueError(F"Unsupported prediction_type {prediction_type}." )
# 3. Clip "predicted x_0"
UpperCAmelCase_ : Optional[Any] = self.bit_scale
if self.config.clip_sample:
UpperCAmelCase_ : Tuple = torch.clamp(lowercase__ , -scale , lowercase__ )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase_ : Tuple = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
UpperCAmelCase_ : int = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase_ : List[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
UpperCAmelCase_ : Tuple = 0
if t > 0:
UpperCAmelCase_ : Optional[Any] = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=lowercase__ ).to(model_output.device )
UpperCAmelCase_ : Optional[int] = (self._get_variance(lowercase__ , predicted_variance=lowercase__ ) ** 0.5) * noise
UpperCAmelCase_ : Dict = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=lowercase__ , pred_original_sample=lowercase__ )
class lowerCamelCase (__lowercase ):
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1.0 , ) -> Any:
super().__init__()
UpperCAmelCase_ : Optional[Any] = bit_scale
UpperCAmelCase_ : Optional[int] = (
ddim_bit_scheduler_step if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __call__( self , _UpperCamelCase = 2_5_6 , _UpperCamelCase = 2_5_6 , _UpperCamelCase = 5_0 , _UpperCamelCase = None , _UpperCamelCase = 1 , _UpperCamelCase = "pil" , _UpperCamelCase = True , **_UpperCamelCase , ) -> Union[Tuple, ImagePipelineOutput]:
UpperCAmelCase_ : Tuple = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=SCREAMING_SNAKE_CASE_ , )
UpperCAmelCase_ : List[str] = decimal_to_bits(SCREAMING_SNAKE_CASE_ ) * self.bit_scale
UpperCAmelCase_ : Optional[Any] = latents.to(self.device )
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
UpperCAmelCase_ : Optional[int] = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ : List[Any] = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCAmelCase_ : Dict = bits_to_decimal(SCREAMING_SNAKE_CASE_ )
if output_type == "pil":
UpperCAmelCase_ : Union[str, Any] = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE_ )
| 406 |
'''simple docstring'''
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def _a () -> Union[str, Any]:
"""simple docstring"""
__snake_case = 1_0
__snake_case = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
__snake_case = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [9_7], 'text': ['1976']}] * 1_0,
'id': list(range(lowercase__ ) ),
} , features=lowercase__ , )
return dataset
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple , lowercase__ : Dict ) -> Tuple:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=lowercase__ )
return filename
# FILE_CONTENT + files
_a : Union[str, Any] = "\\n Text data.\n Second line of data."
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> List[str]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt'
__snake_case = FILE_CONTENT
with open(lowercase__ , 'w' ) as f:
f.write(lowercase__ )
return filename
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
import bza
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
__snake_case = bytes(lowercase__ , 'utf-8' )
with bza.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Union[str, Any] ) -> Dict:
"""simple docstring"""
import gzip
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
__snake_case = bytes(lowercase__ , 'utf-8' )
with gzip.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple ) -> Optional[int]:
"""simple docstring"""
if datasets.config.LZ4_AVAILABLE:
import lza.frame
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
__snake_case = bytes(lowercase__ , 'utf-8' )
with lza.frame.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Dict , lowercase__ : Tuple ) -> Tuple:
"""simple docstring"""
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(lowercase__ , 'w' ) as archive:
archive.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[int] , lowercase__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
import tarfile
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(lowercase__ , 'w' ) as f:
f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> Tuple:
"""simple docstring"""
import lzma
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
__snake_case = bytes(lowercase__ , 'utf-8' )
with lzma.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Dict , lowercase__ : str ) -> Union[str, Any]:
"""simple docstring"""
import zipfile
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> int:
"""simple docstring"""
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
__snake_case = bytes(lowercase__ , 'utf-8' )
with zstd.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.xml'
__snake_case = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(lowercase__ , 'w' ) as f:
f.write(lowercase__ )
return filename
_a : int = [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
_a : List[str] = [
{"col_1": "4", "col_2": 4, "col_3": 4.0},
{"col_1": "5", "col_2": 5, "col_3": 5.0},
]
_a : Tuple = {
"col_1": ["0", "1", "2", "3"],
"col_2": [0, 1, 2, 3],
"col_3": [0.0, 1.0, 2.0, 3.0],
}
_a : Optional[int] = [
{"col_3": 0.0, "col_1": "0", "col_2": 0},
{"col_3": 1.0, "col_1": "1", "col_2": 1},
]
_a : Any = [
{"col_1": "s0", "col_2": 0, "col_3": 0.0},
{"col_1": "s1", "col_2": 1, "col_3": 1.0},
{"col_1": "s2", "col_2": 2, "col_3": 2.0},
{"col_1": "s3", "col_2": 3, "col_3": 3.0},
]
@pytest.fixture(scope='session' )
def _a () -> Optional[Any]:
"""simple docstring"""
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case = datasets.Dataset.from_dict(lowercase__ )
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Any ) -> Dict:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(lowercase__ ) ) as con:
__snake_case = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> Tuple:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(lowercase__ , 'w' , newline='' ) as f:
__snake_case = csv.DictWriter(lowercase__ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> Tuple:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(lowercase__ , 'w' , newline='' ) as f:
__snake_case = csv.DictWriter(lowercase__ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : str , lowercase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
import bza
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(lowercase__ , 'rb' ) as f:
__snake_case = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] , lowercase__ : Union[str, Any] , lowercase__ : str ) -> Optional[int]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] , lowercase__ : Tuple , lowercase__ : int ) -> int:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(lowercase__ , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] , lowercase__ : Dict , lowercase__ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] ) -> int:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
__snake_case = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(lowercase__ , 'wb' ) as f:
__snake_case = pq.ParquetWriter(lowercase__ , schema=lowercase__ )
__snake_case = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowercase__ ) )] for k in DATA[0]} , schema=lowercase__ )
writer.write_table(lowercase__ )
writer.close()
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> List[str]:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
__snake_case = {'data': DATA}
with open(lowercase__ , 'w' ) as f:
json.dump(lowercase__ , lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] ) -> List[Any]:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
__snake_case = {'data': DATA_DICT_OF_LISTS}
with open(lowercase__ , 'w' ) as f:
json.dump(lowercase__ , lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(lowercase__ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowercase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(lowercase__ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowercase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : int ) -> Tuple:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(lowercase__ , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(lowercase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Dict ) -> int:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(lowercase__ , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(lowercase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : int , lowercase__ : List[Any] ) -> Dict:
"""simple docstring"""
import gzip
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(lowercase__ , 'rb' ) as orig_file:
with gzip.open(lowercase__ , 'wb' ) as zipped_file:
zipped_file.writelines(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Union[str, Any] , lowercase__ : Dict ) -> Optional[Any]:
"""simple docstring"""
import gzip
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(lowercase__ , 'rb' ) as orig_file:
with gzip.open(lowercase__ , 'wb' ) as zipped_file:
zipped_file.writelines(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : str , lowercase__ : str , lowercase__ : str ) -> Optional[int]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Dict , lowercase__ : Optional[int] , lowercase__ : List[Any] , lowercase__ : List[Any] ) -> str:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.join('nested' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : str ) -> Optional[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : str , lowercase__ : Optional[int] , lowercase__ : Tuple ) -> List[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(lowercase__ , 'w' ) as f:
f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple , lowercase__ : Tuple , lowercase__ : Dict , lowercase__ : int ) -> Optional[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(lowercase__ , 'w' ) as f:
f.add(lowercase__ , arcname=os.path.join('nested' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__snake_case = ['0', '1', '2', '3']
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(lowercase__ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] ) -> List[Any]:
"""simple docstring"""
__snake_case = ['0', '1', '2', '3']
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(lowercase__ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[int] ) -> Dict:
"""simple docstring"""
__snake_case = ['0', '1', '2', '3']
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(lowercase__ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] , lowercase__ : Union[str, Any] , lowercase__ : Any ) -> str:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple , lowercase__ : Any , lowercase__ : Tuple ) -> List[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] , lowercase__ : Optional[int] , lowercase__ : Any ) -> Union[str, Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename('unsupported.ext' ) )
f.write(lowercase__ , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Any ) -> List[Any]:
"""simple docstring"""
__snake_case = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(lowercase__ , 'w' , encoding='utf-8' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a () -> int:
"""simple docstring"""
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def _a () -> Optional[int]:
"""simple docstring"""
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] , lowercase__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : str ) -> List[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 1_0 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 1_0 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
return data_dir
| 56 | 0 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__SCREAMING_SNAKE_CASE : Optional[Any] =logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def a__ ( self , A , A = None , A = None , A = None , A = True , ) -> Any:
A: Optional[int] = [file for file in os.listdir(SCREAMING_SNAKE_CASE_ ) if os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )]
if identifier is not None:
A: Dict = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
for n_ in n_identifier:
A: str = [file for file in files if n_ not in file]
else:
A: Union[str, Any] = [file for file in files if n_identifier not in file]
A: int = ignore_files or []
ignore_files.append("""__init__.py""" )
A: Optional[Any] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , SCREAMING_SNAKE_CASE_ )
if only_modules:
A: Optional[int] = file.split(""".""" )[0]
try:
A: Optional[Any] = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A: Optional[Any] = doctest.DocTestSuite(SCREAMING_SNAKE_CASE_ )
A: Optional[int] = unittest.TextTestRunner().run(SCREAMING_SNAKE_CASE_ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f'{module_identifier} is not a module.' )
else:
A: Dict = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def a__ ( self ) -> int:
A: Optional[int] = Path("""src/transformers""" )
A: List[str] = """modeling"""
A: Union[str, Any] = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(SCREAMING_SNAKE_CASE_ , identifier=SCREAMING_SNAKE_CASE_ , ignore_files=SCREAMING_SNAKE_CASE_ )
def a__ ( self ) -> str:
A: Any = Path("""src/transformers""" )
A: Optional[Any] = """tokenization"""
self.analyze_directory(SCREAMING_SNAKE_CASE_ , identifier=SCREAMING_SNAKE_CASE_ )
def a__ ( self ) -> Tuple:
A: Any = Path("""src/transformers""" )
A: Optional[Any] = """configuration"""
self.analyze_directory(SCREAMING_SNAKE_CASE_ , identifier=SCREAMING_SNAKE_CASE_ )
def a__ ( self ) -> int:
A: List[Any] = Path("""src/transformers""" )
A: Tuple = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(SCREAMING_SNAKE_CASE_ , n_identifier=SCREAMING_SNAKE_CASE_ )
def a__ ( self ) -> Tuple:
A: Optional[Any] = Path("""docs/source""" )
A: List[str] = ["""favicon.ico"""]
self.analyze_directory(SCREAMING_SNAKE_CASE_ , ignore_files=SCREAMING_SNAKE_CASE_ , only_modules=SCREAMING_SNAKE_CASE_ )
| 135 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a : Optional[Any] = logging.get_logger(__name__)
_a : Tuple = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = "camembert"
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3_0522 , SCREAMING_SNAKE_CASE_ : str=768 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=12 , SCREAMING_SNAKE_CASE_ : Dict=12 , SCREAMING_SNAKE_CASE_ : Optional[Any]=3072 , SCREAMING_SNAKE_CASE_ : Tuple="gelu" , SCREAMING_SNAKE_CASE_ : List[str]=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=512 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : Any=0.0_2 , SCREAMING_SNAKE_CASE_ : Tuple=1e-12 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1 , SCREAMING_SNAKE_CASE_ : Dict=0 , SCREAMING_SNAKE_CASE_ : int=2 , SCREAMING_SNAKE_CASE_ : Dict="absolute" , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : Tuple=None , **SCREAMING_SNAKE_CASE_ : Dict , ) -> int:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = position_embedding_type
__snake_case = use_cache
__snake_case = classifier_dropout
class _lowercase ( __lowercase ):
@property
def a ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__snake_case = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 56 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
class __UpperCamelCase :
def __init__( self : Optional[Any] , UpperCAmelCase : list[str] ) -> Optional[int]:
lowerCAmelCase :Tuple = []
self.adlist.append(
{'value': '', 'next_states': [], 'fail_state': 0, 'output': []} )
for keyword in keywords:
self.add_keyword(SCREAMING_SNAKE_CASE_ )
self.set_fail_transitions()
def UpperCAmelCase__ ( self : List[str] , UpperCAmelCase : int , UpperCAmelCase : str ) -> int | None:
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def UpperCAmelCase__ ( self : str , UpperCAmelCase : str ) -> None:
lowerCAmelCase :str = 0
for character in keyword:
lowerCAmelCase :Tuple = self.find_next_state(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if next_state is None:
self.adlist.append(
{
'value': character,
'next_states': [],
'fail_state': 0,
'output': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
lowerCAmelCase :Optional[int] = len(self.adlist ) - 1
else:
lowerCAmelCase :Optional[int] = next_state
self.adlist[current_state]["output"].append(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase__ ( self : str ) -> None:
lowerCAmelCase :List[str] = deque()
for node in self.adlist[0]["next_states"]:
q.append(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase :Any = 0
while q:
lowerCAmelCase :Optional[Any] = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase :Union[str, Any] = self.adlist[r]['fail_state']
while (
self.find_next_state(SCREAMING_SNAKE_CASE_ , self.adlist[child]['value'] ) is None
and state != 0
):
lowerCAmelCase :List[Any] = self.adlist[state]['fail_state']
lowerCAmelCase :Union[str, Any] = self.find_next_state(
SCREAMING_SNAKE_CASE_ , self.adlist[child]['value'] )
if self.adlist[child]["fail_state"] is None:
lowerCAmelCase :Dict = 0
lowerCAmelCase :Optional[int] = (
self.adlist[child]['output']
+ self.adlist[self.adlist[child]['fail_state']]['output']
)
def UpperCAmelCase__ ( self : Tuple , UpperCAmelCase : str ) -> dict[str, list[int]]:
lowerCAmelCase :Optional[Any] = {} # returns a dict with keywords and list of its occurrences
lowerCAmelCase :int = 0
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
while (
self.find_next_state(SCREAMING_SNAKE_CASE_ , string[i] ) is None
and current_state != 0
):
lowerCAmelCase :Any = self.adlist[current_state]['fail_state']
lowerCAmelCase :Optional[int] = self.find_next_state(SCREAMING_SNAKE_CASE_ , string[i] )
if next_state is None:
lowerCAmelCase :Optional[Any] = 0
else:
lowerCAmelCase :List[Any] = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
lowerCAmelCase :List[str] = []
result[key].append(i - len(SCREAMING_SNAKE_CASE_ ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 553 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : List[str] = logging.get_logger(__name__)
_a : Dict = {
"facebook/timesformer": "https://huggingface.co/facebook/timesformer/resolve/main/config.json",
}
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : int = "timesformer"
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : List[str]=224 , SCREAMING_SNAKE_CASE_ : List[str]=16 , SCREAMING_SNAKE_CASE_ : Any=3 , SCREAMING_SNAKE_CASE_ : int=8 , SCREAMING_SNAKE_CASE_ : Tuple=768 , SCREAMING_SNAKE_CASE_ : int=12 , SCREAMING_SNAKE_CASE_ : Optional[int]=12 , SCREAMING_SNAKE_CASE_ : Optional[int]=3072 , SCREAMING_SNAKE_CASE_ : Tuple="gelu" , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.0 , SCREAMING_SNAKE_CASE_ : List[Any]=0.0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0_2 , SCREAMING_SNAKE_CASE_ : Any=1e-6 , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : List[str]="divided_space_time" , SCREAMING_SNAKE_CASE_ : int=0 , **SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> List[str]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = num_frames
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = qkv_bias
__snake_case = attention_type
__snake_case = drop_path_rate
| 56 | 0 |
from pathlib import Path
import fire
from tqdm import tqdm
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Dict="ro" , UpperCamelCase__: Any="en" , UpperCamelCase__: Dict="wmt16" , UpperCamelCase__: str=None ):
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("""run pip install datasets""" )
SCREAMING_SNAKE_CASE__ = f'''{src_lang}-{tgt_lang}'''
print(f'''Converting {dataset}-{pair}''' )
SCREAMING_SNAKE_CASE__ = datasets.load_dataset(lowercase__ , lowercase__ )
if save_dir is None:
SCREAMING_SNAKE_CASE__ = f'''{dataset}-{pair}'''
SCREAMING_SNAKE_CASE__ = Path(lowercase__ )
save_dir.mkdir(exist_ok=lowercase__ )
for split in ds.keys():
print(f'''Splitting {split} with {ds[split].num_rows} records''' )
# to save to val.source, val.target like summary datasets
SCREAMING_SNAKE_CASE__ = """val""" if split == """validation""" else split
SCREAMING_SNAKE_CASE__ = save_dir.joinpath(f'''{fn}.source''' )
SCREAMING_SNAKE_CASE__ = save_dir.joinpath(f'''{fn}.target''' )
SCREAMING_SNAKE_CASE__ = src_path.open("""w+""" )
SCREAMING_SNAKE_CASE__ = tgt_path.open("""w+""" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
SCREAMING_SNAKE_CASE__ = x["""translation"""]
src_fp.write(ex[src_lang] + """\n""" )
tgt_fp.write(ex[tgt_lang] + """\n""" )
print(f'''Saved {dataset} dataset to {save_dir}''' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 6 |
'''simple docstring'''
from typing import Any
class _lowercase :
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Any ) -> Any:
__snake_case = data
__snake_case = None
class _lowercase :
def __init__( self : List[Any] ) -> Tuple:
__snake_case = None
def a ( self : int ) -> Union[str, Any]:
__snake_case = self.head
while temp is not None:
print(temp.data , end=' ' )
__snake_case = temp.next
print()
def a ( self : Dict , SCREAMING_SNAKE_CASE_ : Any ) -> List[str]:
__snake_case = Node(SCREAMING_SNAKE_CASE_ )
__snake_case = self.head
__snake_case = new_node
def a ( self : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any ) -> List[str]:
if node_data_a == node_data_a:
return
else:
__snake_case = self.head
while node_a is not None and node_a.data != node_data_a:
__snake_case = node_a.next
__snake_case = self.head
while node_a is not None and node_a.data != node_data_a:
__snake_case = node_a.next
if node_a is None or node_a is None:
return
__snake_case , __snake_case = node_a.data, node_a.data
if __name__ == "__main__":
_a : Dict = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("After swapping")
ll.print_list()
| 56 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Dict = logging.get_logger(__name__)
A_ : Dict = {}
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : int ='''llama'''
a : int =['''past_key_values''']
def __init__( self , _lowerCamelCase=3_2_0_0_0 , _lowerCamelCase=4_0_9_6 , _lowerCamelCase=1_1_0_0_8 , _lowerCamelCase=3_2 , _lowerCamelCase=3_2 , _lowerCamelCase=None , _lowerCamelCase="silu" , _lowerCamelCase=2_0_4_8 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1e-6 , _lowerCamelCase=True , _lowerCamelCase=0 , _lowerCamelCase=1 , _lowerCamelCase=2 , _lowerCamelCase=1 , _lowerCamelCase=False , _lowerCamelCase=None , **_lowerCamelCase , ):
UpperCamelCase_: Tuple = vocab_size
UpperCamelCase_: int = max_position_embeddings
UpperCamelCase_: Union[str, Any] = hidden_size
UpperCamelCase_: Tuple = intermediate_size
UpperCamelCase_: int = num_hidden_layers
UpperCamelCase_: Optional[Any] = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
UpperCamelCase_: int = num_attention_heads
UpperCamelCase_: Any = num_key_value_heads
UpperCamelCase_: Optional[Any] = hidden_act
UpperCamelCase_: Optional[Any] = initializer_range
UpperCamelCase_: int = rms_norm_eps
UpperCamelCase_: Tuple = pretraining_tp
UpperCamelCase_: str = use_cache
UpperCamelCase_: Tuple = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , tie_word_embeddings=_lowerCamelCase , **_lowerCamelCase , )
def _a ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _lowerCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f'''got {self.rope_scaling}''' )
UpperCamelCase_: Any = self.rope_scaling.get('type' , _lowerCamelCase )
UpperCamelCase_: List[str] = self.rope_scaling.get('factor' , _lowerCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(_lowerCamelCase , _lowerCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 57 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _lowerCAmelCase:
"""simple docstring"""
a : int =PegasusConfig
a : List[str] ={}
a : Optional[int] ='''gelu'''
def __init__( self , _lowerCamelCase , _lowerCamelCase=1_3 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=9_9 , _lowerCamelCase=3_2 , _lowerCamelCase=2 , _lowerCamelCase=4 , _lowerCamelCase=3_7 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=4_0 , _lowerCamelCase=2 , _lowerCamelCase=1 , _lowerCamelCase=0 , ):
UpperCamelCase_: List[Any] = parent
UpperCamelCase_: Dict = batch_size
UpperCamelCase_: List[str] = seq_length
UpperCamelCase_: List[str] = is_training
UpperCamelCase_: Any = use_labels
UpperCamelCase_: Optional[Any] = vocab_size
UpperCamelCase_: Tuple = hidden_size
UpperCamelCase_: List[Any] = num_hidden_layers
UpperCamelCase_: Any = num_attention_heads
UpperCamelCase_: Optional[Any] = intermediate_size
UpperCamelCase_: Optional[int] = hidden_dropout_prob
UpperCamelCase_: int = attention_probs_dropout_prob
UpperCamelCase_: Union[str, Any] = max_position_embeddings
UpperCamelCase_: Dict = eos_token_id
UpperCamelCase_: Union[str, Any] = pad_token_id
UpperCamelCase_: List[Any] = bos_token_id
def _a ( self ):
UpperCamelCase_: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCamelCase_: int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase_: List[str] = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCamelCase_: Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_: Tuple = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCamelCase_: Optional[Any] = prepare_pegasus_inputs_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return config, inputs_dict
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Optional[Any] = TFPegasusModel(config=_lowerCamelCase ).get_decoder()
UpperCamelCase_: Optional[int] = inputs_dict['input_ids']
UpperCamelCase_: Optional[int] = input_ids[:1, :]
UpperCamelCase_: int = inputs_dict['attention_mask'][:1, :]
UpperCamelCase_: Optional[int] = inputs_dict['head_mask']
UpperCamelCase_: Optional[int] = 1
# first forward pass
UpperCamelCase_: Union[str, Any] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , head_mask=_lowerCamelCase , use_cache=_lowerCamelCase )
UpperCamelCase_ ,UpperCamelCase_: int = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase_: int = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase_: List[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCamelCase_: Union[str, Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCamelCase_: int = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCamelCase_: Any = model(_lowerCamelCase , attention_mask=_lowerCamelCase )[0]
UpperCamelCase_: int = model(_lowerCamelCase , attention_mask=_lowerCamelCase , past_key_values=_lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCamelCase_: Optional[int] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCamelCase_: Any = output_from_no_past[:, -3:, random_slice_idx]
UpperCamelCase_: Union[str, Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_lowerCamelCase , _lowerCamelCase , rtol=1e-3 )
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , ) -> str:
if attention_mask is None:
UpperCamelCase_: Optional[Any] = tf.cast(tf.math.not_equal(UpperCAmelCase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCamelCase_: int = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCamelCase_: str = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCamelCase_: Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCamelCase_: int = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _lowerCAmelCase( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
a : Tuple =(TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
a : int =(TFPegasusForConditionalGeneration,) if is_tf_available() else ()
a : Tuple =(
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
a : List[str] =True
a : List[str] =False
a : Tuple =False
def _a ( self ):
UpperCamelCase_: Dict = TFPegasusModelTester(self )
UpperCamelCase_: Any = ConfigTester(self , config_class=_lowerCamelCase )
def _a ( self ):
self.config_tester.run_common_tests()
def _a ( self ):
UpperCamelCase_: Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_lowerCamelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
a : Dict =[
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
a : int =[
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
a : Union[str, Any] ='''google/pegasus-xsum'''
@cached_property
def _a ( self ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _a ( self ):
UpperCamelCase_: Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _a ( self , **_lowerCamelCase ):
UpperCamelCase_: Dict = self.translate_src_text(**_lowerCamelCase )
assert self.expected_text == generated_words
def _a ( self , **_lowerCamelCase ):
UpperCamelCase_: Union[str, Any] = self.tokenizer(self.src_text , **_lowerCamelCase , padding=_lowerCamelCase , return_tensors='tf' )
UpperCamelCase_: Any = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=_lowerCamelCase , )
UpperCamelCase_: str = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_lowerCamelCase )
return generated_words
@slow
def _a ( self ):
self._assert_generated_batch_equal_expected()
| 57 | 1 |
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
A_ : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
A_ : Optional[int] = 256
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : Union[str, Any] =['''melgan''']
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
super().__init__()
# From MELGAN
UpperCamelCase_: Any = math.log(1e-5 ) # Matches MelGAN training.
UpperCamelCase_: List[Any] = 4.0 # Largest value for most examples
UpperCamelCase_: Tuple = 1_2_8
self.register_modules(
notes_encoder=_lowerCamelCase , continuous_encoder=_lowerCamelCase , decoder=_lowerCamelCase , scheduler=_lowerCamelCase , melgan=_lowerCamelCase , )
def _a ( self , _lowerCamelCase , _lowerCamelCase=(-1.0, 1.0) , _lowerCamelCase=False ):
UpperCamelCase_ ,UpperCamelCase_: Union[str, Any] = output_range
if clip:
UpperCamelCase_: int = torch.clip(_lowerCamelCase , self.min_value , self.max_value )
# Scale to [0, 1].
UpperCamelCase_: List[Any] = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def _a ( self , _lowerCamelCase , _lowerCamelCase=(-1.0, 1.0) , _lowerCamelCase=False ):
UpperCamelCase_ ,UpperCamelCase_: Dict = input_range
UpperCamelCase_: List[str] = torch.clip(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) if clip else outputs
# Scale to [0, 1].
UpperCamelCase_: Optional[Any] = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: str = input_tokens > 0
UpperCamelCase_ ,UpperCamelCase_: Tuple = self.notes_encoder(
encoder_input_tokens=_lowerCamelCase , encoder_inputs_mask=_lowerCamelCase )
UpperCamelCase_ ,UpperCamelCase_: Any = self.continuous_encoder(
encoder_inputs=_lowerCamelCase , encoder_inputs_mask=_lowerCamelCase )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Dict = noise_time
if not torch.is_tensor(_lowerCamelCase ):
UpperCamelCase_: List[str] = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(_lowerCamelCase ) and len(timesteps.shape ) == 0:
UpperCamelCase_: Dict = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCamelCase_: Union[str, Any] = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
UpperCamelCase_: Any = self.decoder(
encodings_and_masks=_lowerCamelCase , decoder_input_tokens=_lowerCamelCase , decoder_noise_time=_lowerCamelCase )
return logits
@torch.no_grad()
def __call__( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = 1_0_0 , _lowerCamelCase = True , _lowerCamelCase = "numpy" , _lowerCamelCase = None , _lowerCamelCase = 1 , ):
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_lowerCamelCase , _lowerCamelCase ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(_lowerCamelCase )}.''' )
UpperCamelCase_: List[str] = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
UpperCamelCase_: str = np.zeros([1, 0, self.n_dims] , np.floataa )
UpperCamelCase_: Dict = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=_lowerCamelCase , device=self.device )
for i, encoder_input_tokens in enumerate(_lowerCamelCase ):
if i == 0:
UpperCamelCase_: str = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
UpperCamelCase_: Tuple = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=_lowerCamelCase , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
UpperCamelCase_: Any = ones
UpperCamelCase_: str = self.scale_features(
_lowerCamelCase , output_range=[-1.0, 1.0] , clip=_lowerCamelCase )
UpperCamelCase_: Union[str, Any] = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=_lowerCamelCase , continuous_mask=_lowerCamelCase , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
UpperCamelCase_: List[str] = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=_lowerCamelCase , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(_lowerCamelCase )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
UpperCamelCase_: int = self.decode(
encodings_and_masks=_lowerCamelCase , input_tokens=_lowerCamelCase , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
UpperCamelCase_: Tuple = self.scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ).prev_sample
UpperCamelCase_: List[Any] = self.scale_to_features(_lowerCamelCase , input_range=[-1.0, 1.0] )
UpperCamelCase_: Any = mel[:1]
UpperCamelCase_: List[str] = mel.cpu().float().numpy()
UpperCamelCase_: Tuple = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_lowerCamelCase , _lowerCamelCase )
logger.info('Generated segment' , _lowerCamelCase )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.' )
if output_type == "numpy":
UpperCamelCase_: int = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
UpperCamelCase_: int = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=_lowerCamelCase )
| 57 |
import unittest
import numpy as np
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , ) -> np.ndarray:
UpperCamelCase_: str = np.shape(UpperCAmelCase__ )
UpperCamelCase_: str = np.shape(UpperCAmelCase__ )
UpperCamelCase_: List[Any] = np.shape(UpperCAmelCase__ )
if shape_a[0] != shape_b[0]:
UpperCamelCase_: Any = (
'Expected the same number of rows for A and B. '
F'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(UpperCAmelCase__ )
if shape_b[1] != shape_c[1]:
UpperCamelCase_: int = (
'Expected the same number of columns for B and C. '
F'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(UpperCAmelCase__ )
UpperCamelCase_: Dict = pseudo_inv
if a_inv is None:
try:
UpperCamelCase_: Optional[Any] = np.linalg.inv(UpperCAmelCase__ )
except np.linalg.LinAlgError:
raise ValueError(
'Input matrix A is not invertible. Cannot compute Schur complement.' )
return mat_c - mat_b.T @ a_inv @ mat_b
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
UpperCamelCase_: Any = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCamelCase_: Dict = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCamelCase_: Tuple = np.array([[2, 1], [6, 3]] )
UpperCamelCase_: Tuple = schur_complement(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: Optional[Any] = np.block([[a, b], [b.T, c]] )
UpperCamelCase_: List[str] = np.linalg.det(_lowerCamelCase )
UpperCamelCase_: List[str] = np.linalg.det(_lowerCamelCase )
UpperCamelCase_: Dict = np.linalg.det(_lowerCamelCase )
self.assertAlmostEqual(_lowerCamelCase , det_a * det_s )
def _a ( self ):
UpperCamelCase_: int = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCamelCase_: List[str] = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCamelCase_: List[str] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(_lowerCamelCase ):
schur_complement(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _a ( self ):
UpperCamelCase_: List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCamelCase_: str = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCamelCase_: List[Any] = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(_lowerCamelCase ):
schur_complement(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 57 | 1 |
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
A_ : Any = logging.get_logger(__name__)
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase ):
super().__init__()
UpperCamelCase_: List[str] = nn.ModuleList(_lowerCamelCase )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = True , ):
for i, (image, scale, controlnet) in enumerate(zip(_lowerCamelCase , _lowerCamelCase , self.nets ) ):
UpperCamelCase_ ,UpperCamelCase_: Dict = controlnet(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , )
# merge samples
if i == 0:
UpperCamelCase_ ,UpperCamelCase_: List[str] = down_samples, mid_sample
else:
UpperCamelCase_: Tuple = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(_lowerCamelCase , _lowerCamelCase )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def _a ( self , _lowerCamelCase , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = None , ):
UpperCamelCase_: int = 0
UpperCamelCase_: Optional[Any] = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
_lowerCamelCase , is_main_process=_lowerCamelCase , save_function=_lowerCamelCase , safe_serialization=_lowerCamelCase , variant=_lowerCamelCase , )
idx += 1
UpperCamelCase_: Optional[Any] = model_path_to_save + f'''_{idx}'''
@classmethod
def _a ( cls , _lowerCamelCase , **_lowerCamelCase ):
UpperCamelCase_: List[Any] = 0
UpperCamelCase_: Dict = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
UpperCamelCase_: int = pretrained_model_path
while os.path.isdir(_lowerCamelCase ):
UpperCamelCase_: Union[str, Any] = ControlNetModel.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
controlnets.append(_lowerCamelCase )
idx += 1
UpperCamelCase_: Union[str, Any] = pretrained_model_path + f'''_{idx}'''
logger.info(f'''{len(_lowerCamelCase )} controlnets loaded from {pretrained_model_path}.''' )
if len(_lowerCamelCase ) == 0:
raise ValueError(
f'''No ControlNets found under {os.path.dirname(_lowerCamelCase )}. Expected at least {pretrained_model_path + '_0'}.''' )
return cls(_lowerCamelCase )
| 57 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> int:
# Load configuration defined in the metadata file
with open(UpperCAmelCase__ ) as metadata_file:
UpperCamelCase_: Tuple = json.load(UpperCAmelCase__ )
UpperCamelCase_: List[str] = LukeConfig(use_entity_aware_attention=UpperCAmelCase__ , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
UpperCamelCase_: Optional[int] = torch.load(UpperCAmelCase__ , map_location='cpu' )['module']
# Load the entity vocab file
UpperCamelCase_: Any = load_original_entity_vocab(UpperCAmelCase__ )
# add an entry for [MASK2]
UpperCamelCase_: List[str] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
UpperCamelCase_: Union[str, Any] = XLMRobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
UpperCamelCase_: Any = AddedToken('<ent>' , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ )
UpperCamelCase_: Optional[Any] = AddedToken('<ent2>' , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(UpperCAmelCase__ )
with open(os.path.join(UpperCAmelCase__ , 'tokenizer_config.json' ) , 'r' ) as f:
UpperCamelCase_: Union[str, Any] = json.load(UpperCAmelCase__ )
UpperCamelCase_: str = 'MLukeTokenizer'
with open(os.path.join(UpperCAmelCase__ , 'tokenizer_config.json' ) , 'w' ) as f:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
with open(os.path.join(UpperCAmelCase__ , MLukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase_: Optional[Any] = MLukeTokenizer.from_pretrained(UpperCAmelCase__ )
# Initialize the embeddings of the special tokens
UpperCamelCase_: Any = tokenizer.convert_tokens_to_ids(['@'] )[0]
UpperCamelCase_: List[str] = tokenizer.convert_tokens_to_ids(['#'] )[0]
UpperCamelCase_: Tuple = state_dict['embeddings.word_embeddings.weight']
UpperCamelCase_: int = word_emb[ent_init_index].unsqueeze(0 )
UpperCamelCase_: Any = word_emb[enta_init_index].unsqueeze(0 )
UpperCamelCase_: str = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
UpperCamelCase_: Union[str, Any] = state_dict[bias_name]
UpperCamelCase_: Tuple = decoder_bias[ent_init_index].unsqueeze(0 )
UpperCamelCase_: Any = decoder_bias[enta_init_index].unsqueeze(0 )
UpperCamelCase_: Optional[Any] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
UpperCamelCase_: List[Any] = F'''encoder.layer.{layer_index}.attention.self.'''
UpperCamelCase_: str = state_dict[prefix + matrix_name]
UpperCamelCase_: Dict = state_dict[prefix + matrix_name]
UpperCamelCase_: Dict = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
UpperCamelCase_: List[str] = state_dict['entity_embeddings.entity_embeddings.weight']
UpperCamelCase_: int = entity_emb[entity_vocab['[MASK]']].unsqueeze(0 )
UpperCamelCase_: Tuple = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
UpperCamelCase_: Optional[Any] = state_dict['entity_predictions.bias']
UpperCamelCase_: List[str] = entity_prediction_bias[entity_vocab['[MASK]']].unsqueeze(0 )
UpperCamelCase_: List[Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
UpperCamelCase_: List[Any] = LukeForMaskedLM(config=UpperCAmelCase__ ).eval()
state_dict.pop('entity_predictions.decoder.weight' )
state_dict.pop('lm_head.decoder.weight' )
state_dict.pop('lm_head.decoder.bias' )
UpperCamelCase_: Optional[Any] = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('lm_head' ) or key.startswith('entity_predictions' )):
UpperCamelCase_: Union[str, Any] = state_dict[key]
else:
UpperCamelCase_: Dict = state_dict[key]
UpperCamelCase_ ,UpperCamelCase_: Optional[int] = model.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__ )
if set(UpperCAmelCase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(UpperCAmelCase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
UpperCamelCase_: Optional[int] = MLukeTokenizer.from_pretrained(UpperCAmelCase__ , task='entity_classification' )
UpperCamelCase_: Tuple = 'ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'
UpperCamelCase_: Optional[int] = (0, 9)
UpperCamelCase_: Union[str, Any] = tokenizer(UpperCAmelCase__ , entity_spans=[span] , return_tensors='pt' )
UpperCamelCase_: str = model(**UpperCAmelCase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
UpperCamelCase_: int = torch.Size((1, 3_3, 7_6_8) )
UpperCamelCase_: Tuple = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase__ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
UpperCamelCase_: Dict = torch.Size((1, 1, 7_6_8) )
UpperCamelCase_: Tuple = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , UpperCAmelCase__ , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
UpperCamelCase_: str = MLukeTokenizer.from_pretrained(UpperCAmelCase__ )
UpperCamelCase_: int = 'Tokyo is the capital of <mask>.'
UpperCamelCase_: Dict = (2_4, 3_0)
UpperCamelCase_: int = tokenizer(UpperCAmelCase__ , entity_spans=[span] , return_tensors='pt' )
UpperCamelCase_: Dict = model(**UpperCAmelCase__ )
UpperCamelCase_: Optional[int] = encoding['input_ids'][0].tolist()
UpperCamelCase_: List[Any] = input_ids.index(tokenizer.convert_tokens_to_ids('<mask>' ) )
UpperCamelCase_: Any = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(UpperCAmelCase__ )
UpperCamelCase_: Union[str, Any] = outputs.entity_logits[0][0].argmax().item()
UpperCamelCase_: Optional[Any] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('en:' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(UpperCAmelCase__ ) )
model.save_pretrained(UpperCAmelCase__ )
def snake_case (UpperCAmelCase__ ) -> int:
UpperCamelCase_: Optional[Any] = ['[MASK]', '[PAD]', '[UNK]']
UpperCamelCase_: Any = [json.loads(UpperCAmelCase__ ) for line in open(UpperCAmelCase__ )]
UpperCamelCase_: Tuple = {}
for entry in data:
UpperCamelCase_: Optional[int] = entry['id']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
UpperCamelCase_: Union[str, Any] = entity_id
break
UpperCamelCase_: Dict = F'''{language}:{entity_name}'''
UpperCamelCase_: Optional[int] = entity_id
return new_mapping
if __name__ == "__main__":
A_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
A_ : List[str] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 57 | 1 |
from collections import defaultdict
class _lowerCAmelCase:
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: List[str] = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
UpperCamelCase_: Union[str, Any] = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(_lowerCamelCase ) )
]
UpperCamelCase_: Optional[int] = defaultdict(_lowerCamelCase ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
UpperCamelCase_: Optional[Any] = (1 << len(_lowerCamelCase )) - 1
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
UpperCamelCase_: Any = self.count_ways_until(_lowerCamelCase , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
UpperCamelCase_: Tuple = total_ways_util
return self.dp[mask][task_no]
def _a ( self , _lowerCamelCase ):
# Store the list of persons for each task
for i in range(len(_lowerCamelCase ) ):
for j in task_performed[i]:
self.task[j].append(_lowerCamelCase )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
A_ : int = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
A_ : Union[str, Any] = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 57 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : Optional[Any] = logging.get_logger(__name__)
A_ : Optional[Any] = {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/config.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/config.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'
),
'distilbert-base-uncased-finetuned-sst-2-english': (
'https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'
),
}
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : Dict ='''distilbert'''
a : List[str] ={
'''hidden_size''': '''dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
}
def __init__( self , _lowerCamelCase=3_0_5_2_2 , _lowerCamelCase=5_1_2 , _lowerCamelCase=False , _lowerCamelCase=6 , _lowerCamelCase=1_2 , _lowerCamelCase=7_6_8 , _lowerCamelCase=4 * 7_6_8 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase="gelu" , _lowerCamelCase=0.0_2 , _lowerCamelCase=0.1 , _lowerCamelCase=0.2 , _lowerCamelCase=0 , **_lowerCamelCase , ):
UpperCamelCase_: Tuple = vocab_size
UpperCamelCase_: str = max_position_embeddings
UpperCamelCase_: Optional[int] = sinusoidal_pos_embds
UpperCamelCase_: Union[str, Any] = n_layers
UpperCamelCase_: Optional[int] = n_heads
UpperCamelCase_: int = dim
UpperCamelCase_: Tuple = hidden_dim
UpperCamelCase_: Any = dropout
UpperCamelCase_: Optional[Any] = attention_dropout
UpperCamelCase_: List[str] = activation
UpperCamelCase_: Optional[Any] = initializer_range
UpperCamelCase_: Optional[Any] = qa_dropout
UpperCamelCase_: List[str] = seq_classif_dropout
super().__init__(**_lowerCamelCase , pad_token_id=_lowerCamelCase )
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
@property
def _a ( self ):
if self.task == "multiple-choice":
UpperCamelCase_: Any = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCamelCase_: List[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 57 | 1 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A_ : Union[str, Any] = logging.get_logger(__name__)
def snake_case (UpperCAmelCase__ , UpperCAmelCase__=False ) -> List[Any]:
UpperCamelCase_: Optional[int] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'deit.embeddings.cls_token'),
('dist_token', 'deit.embeddings.distillation_token'),
('patch_embed.proj.weight', 'deit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'deit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'deit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
UpperCamelCase_: str = [(pair[0], pair[1][4:]) if pair[1].startswith('deit' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('norm.weight', 'deit.layernorm.weight'),
('norm.bias', 'deit.layernorm.bias'),
('head.weight', 'cls_classifier.weight'),
('head.bias', 'cls_classifier.bias'),
('head_dist.weight', 'distillation_classifier.weight'),
('head_dist.bias', 'distillation_classifier.bias'),
] )
return rename_keys
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=False ) -> Optional[int]:
for i in range(config.num_hidden_layers ):
if base_model:
UpperCamelCase_: Tuple = ''
else:
UpperCamelCase_: List[Any] = 'deit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase_: Dict = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
UpperCamelCase_: List[str] = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase_: int = in_proj_weight[
: config.hidden_size, :
]
UpperCamelCase_: List[Any] = in_proj_bias[: config.hidden_size]
UpperCamelCase_: Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase_: int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase_: Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase_: int = in_proj_bias[-config.hidden_size :]
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Union[str, Any]:
UpperCamelCase_: List[str] = dct.pop(UpperCAmelCase__ )
UpperCamelCase_: str = val
def snake_case () -> Any:
UpperCamelCase_: int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase_: int = Image.open(requests.get(UpperCAmelCase__ , stream=UpperCAmelCase__ ).raw )
return im
@torch.no_grad()
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> int:
UpperCamelCase_: Optional[int] = DeiTConfig()
# all deit models have fine-tuned heads
UpperCamelCase_: Optional[int] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
UpperCamelCase_: Optional[Any] = 1_0_0_0
UpperCamelCase_: Any = 'huggingface/label-files'
UpperCamelCase_: str = 'imagenet-1k-id2label.json'
UpperCamelCase_: Optional[int] = json.load(open(hf_hub_download(UpperCAmelCase__ , UpperCAmelCase__ , repo_type='dataset' ) , 'r' ) )
UpperCamelCase_: Any = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
UpperCamelCase_: List[Any] = idalabel
UpperCamelCase_: Dict = {v: k for k, v in idalabel.items()}
UpperCamelCase_: Optional[Any] = int(deit_name[-6:-4] )
UpperCamelCase_: Union[str, Any] = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('tiny' ):
UpperCamelCase_: Union[str, Any] = 1_9_2
UpperCamelCase_: str = 7_6_8
UpperCamelCase_: Dict = 1_2
UpperCamelCase_: Dict = 3
elif deit_name[9:].startswith('small' ):
UpperCamelCase_: Union[str, Any] = 3_8_4
UpperCamelCase_: Tuple = 1_5_3_6
UpperCamelCase_: Optional[Any] = 1_2
UpperCamelCase_: Optional[Any] = 6
if deit_name[9:].startswith('base' ):
pass
elif deit_name[4:].startswith('large' ):
UpperCamelCase_: int = 1_0_2_4
UpperCamelCase_: Optional[Any] = 4_0_9_6
UpperCamelCase_: Any = 2_4
UpperCamelCase_: Optional[int] = 1_6
# load original model from timm
UpperCamelCase_: Tuple = timm.create_model(UpperCAmelCase__ , pretrained=UpperCAmelCase__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCamelCase_: Tuple = timm_model.state_dict()
UpperCamelCase_: Any = create_rename_keys(UpperCAmelCase__ , UpperCAmelCase__ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
read_in_q_k_v(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# load HuggingFace model
UpperCamelCase_: str = DeiTForImageClassificationWithTeacher(UpperCAmelCase__ ).eval()
model.load_state_dict(UpperCAmelCase__ )
# Check outputs on an image, prepared by DeiTImageProcessor
UpperCamelCase_: Union[str, Any] = int(
(2_5_6 / 2_2_4) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
UpperCamelCase_: int = DeiTImageProcessor(size=UpperCAmelCase__ , crop_size=config.image_size )
UpperCamelCase_: List[str] = image_processor(images=prepare_img() , return_tensors='pt' )
UpperCamelCase_: Optional[Any] = encoding['pixel_values']
UpperCamelCase_: int = model(UpperCAmelCase__ )
UpperCamelCase_: List[str] = timm_model(UpperCAmelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCAmelCase__ , outputs.logits , atol=1E-3 )
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
print(F'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
A_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--deit_name',
default='vit_deit_base_distilled_patch16_224',
type=str,
help='Name of the DeiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
A_ : int = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 57 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ : int = {
'configuration_lilt': ['LILT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LiltConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = [
'LILT_PRETRAINED_MODEL_ARCHIVE_LIST',
'LiltForQuestionAnswering',
'LiltForSequenceClassification',
'LiltForTokenClassification',
'LiltModel',
'LiltPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
A_ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 57 | 1 |
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
A_ : Optional[int] = True
except ImportError:
A_ : List[str] = False
A_ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
def snake_case (UpperCAmelCase__ ) -> str:
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
@staticmethod
def _a ( _lowerCamelCase ):
UpperCamelCase_: List[str] = parser.add_parser('add-new-model' )
add_new_model_parser.add_argument('--testing' , action='store_true' , help='If in testing mode.' )
add_new_model_parser.add_argument('--testing_file' , type=_lowerCamelCase , help='Configuration file on which to run.' )
add_new_model_parser.add_argument(
'--path' , type=_lowerCamelCase , help='Path to cookiecutter. Should only be used for testing purposes.' )
add_new_model_parser.set_defaults(func=_lowerCamelCase )
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , *_lowerCamelCase ):
UpperCamelCase_: List[str] = testing
UpperCamelCase_: str = testing_file
UpperCamelCase_: str = path
def _a ( self ):
warnings.warn(
'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '
'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '
'checks, you should use `transformers-cli add-new-model-like` instead.' )
if not _has_cookiecutter:
raise ImportError(
'Model creation dependencies are required to use the `add_new_model` command. Install them by running '
'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
UpperCamelCase_: Tuple = [directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:2_2]]
if len(_lowerCamelCase ) > 0:
raise ValueError(
'Several directories starting with `cookiecutter-template-` in current working directory. '
'Please clean your directory by removing all folders starting with `cookiecutter-template-` or '
'change your working directory.' )
UpperCamelCase_: List[Any] = (
Path(_lowerCamelCase ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
UpperCamelCase_: Optional[int] = path_to_transformer_root / 'templates' / 'adding_a_new_model'
# Execute cookiecutter
if not self._testing:
cookiecutter(str(_lowerCamelCase ) )
else:
with open(self._testing_file , 'r' ) as configuration_file:
UpperCamelCase_: Optional[Any] = json.load(_lowerCamelCase )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=_lowerCamelCase , extra_context=_lowerCamelCase , )
UpperCamelCase_: Optional[Any] = [directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:2_2]][0]
# Retrieve configuration
with open(directory + '/configuration.json' , 'r' ) as configuration_file:
UpperCamelCase_: Tuple = json.load(_lowerCamelCase )
UpperCamelCase_: Tuple = configuration['lowercase_modelname']
UpperCamelCase_: Tuple = configuration['generate_tensorflow_pytorch_and_flax']
os.remove(f'''{directory}/configuration.json''' )
UpperCamelCase_: Any = 'PyTorch' in generate_tensorflow_pytorch_and_flax
UpperCamelCase_: int = 'TensorFlow' in generate_tensorflow_pytorch_and_flax
UpperCamelCase_: str = 'Flax' in generate_tensorflow_pytorch_and_flax
UpperCamelCase_: Optional[int] = f'''{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'''
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
os.makedirs(f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}''' , exist_ok=_lowerCamelCase )
# Tests require submodules as they have parent imports
with open(f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py''' , 'w' ):
pass
shutil.move(
f'''{directory}/__init__.py''' , f'''{model_dir}/__init__.py''' , )
shutil.move(
f'''{directory}/configuration_{lowercase_model_name}.py''' , f'''{model_dir}/configuration_{lowercase_model_name}.py''' , )
def remove_copy_lines(_lowerCamelCase ):
with open(_lowerCamelCase , 'r' ) as f:
UpperCamelCase_: Optional[int] = f.readlines()
with open(_lowerCamelCase , 'w' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(_lowerCamelCase )
if output_pytorch:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_{lowercase_model_name}.py''' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_tf_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_tf_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' )
if output_flax:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_flax_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_flax_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/{lowercase_model_name}.md''' , f'''{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md''' , )
shutil.move(
f'''{directory}/tokenization_{lowercase_model_name}.py''' , f'''{model_dir}/tokenization_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/tokenization_fast_{lowercase_model_name}.py''' , f'''{model_dir}/tokenization_{lowercase_model_name}_fast.py''' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
# Create temp file
UpperCamelCase_ ,UpperCamelCase_: Dict = mkstemp()
UpperCamelCase_: Optional[Any] = False
with fdopen(_lowerCamelCase , 'w' ) as new_file:
with open(_lowerCamelCase ) as old_file:
for line in old_file:
new_file.write(_lowerCamelCase )
if line_to_copy_below in line:
UpperCamelCase_: int = True
for line_to_copy in lines_to_copy:
new_file.write(_lowerCamelCase )
if not line_found:
raise ValueError(f'''Line {line_to_copy_below} was not found in file.''' )
# Copy the file permissions from the old file to the new file
copymode(_lowerCamelCase , _lowerCamelCase )
# Remove original file
remove(_lowerCamelCase )
# Move new file
move(_lowerCamelCase , _lowerCamelCase )
def skip_units(_lowerCamelCase ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(_lowerCamelCase ):
with open(_lowerCamelCase ) as datafile:
UpperCamelCase_: Any = []
UpperCamelCase_: Optional[Any] = False
UpperCamelCase_: Tuple = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
UpperCamelCase_: Optional[Any] = line.split('"' )[1]
UpperCamelCase_: Union[str, Any] = skip_units(_lowerCamelCase )
elif "# Below: " in line and "##" not in line:
UpperCamelCase_: str = line.split('"' )[1]
UpperCamelCase_: List[str] = skip_units(_lowerCamelCase )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: Optional[Any] = []
elif "# Replace with" in line and "##" not in line:
UpperCamelCase_: Any = []
elif "##" not in line:
lines_to_copy.append(_lowerCamelCase )
remove(_lowerCamelCase )
replace_in_files(f'''{directory}/to_replace_{lowercase_model_name}.py''' )
os.rmdir(_lowerCamelCase )
| 57 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A_ : List[str] = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
A_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 57 | 1 |
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
A_ : Any = re.compile(r'\s+')
def snake_case (UpperCAmelCase__ ) -> List[str]:
return {"hash": hashlib.mda(re.sub(UpperCAmelCase__ , '' , example['content'] ).encode('utf-8' ) ).hexdigest()}
def snake_case (UpperCAmelCase__ ) -> str:
UpperCamelCase_: Optional[int] = [len(UpperCAmelCase__ ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(UpperCAmelCase__ ), "line_max": max(UpperCAmelCase__ )}
def snake_case (UpperCAmelCase__ ) -> List[str]:
UpperCamelCase_: Any = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> List[Any]:
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def snake_case (UpperCAmelCase__ , UpperCAmelCase__=5 ) -> Union[str, Any]:
UpperCamelCase_: Optional[Any] = ['auto-generated', 'autogenerated', 'automatically generated']
UpperCamelCase_: Optional[Any] = example['content'].splitlines()
for _, line in zip(range(UpperCAmelCase__ ) , UpperCAmelCase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def snake_case (UpperCAmelCase__ , UpperCAmelCase__=5 , UpperCAmelCase__=0.05 ) -> List[str]:
UpperCamelCase_: List[str] = ['unit tests', 'test file', 'configuration file']
UpperCamelCase_: Dict = example['content'].splitlines()
UpperCamelCase_: Optional[int] = 0
UpperCamelCase_: Optional[Any] = 0
# first test
for _, line in zip(range(UpperCAmelCase__ ) , UpperCAmelCase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
UpperCamelCase_: Union[str, Any] = example['content'].count('\n' )
UpperCamelCase_: List[Any] = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def snake_case (UpperCAmelCase__ ) -> Any:
UpperCamelCase_: List[str] = ['def ', 'class ', 'for ', 'while ']
UpperCamelCase_: int = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def snake_case (UpperCAmelCase__ , UpperCAmelCase__=4 ) -> int:
UpperCamelCase_: str = example['content'].splitlines()
UpperCamelCase_: Optional[Any] = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def snake_case (UpperCAmelCase__ ) -> str:
UpperCamelCase_: int = tokenizer(example['content'] , truncation=UpperCAmelCase__ )['input_ids']
UpperCamelCase_: Optional[Any] = len(example['content'] ) / len(UpperCAmelCase__ )
return {"ratio": ratio}
def snake_case (UpperCAmelCase__ ) -> Dict:
UpperCamelCase_: Optional[Any] = {}
results.update(get_hash(UpperCAmelCase__ ) )
results.update(line_stats(UpperCAmelCase__ ) )
results.update(alpha_stats(UpperCAmelCase__ ) )
results.update(char_token_ratio(UpperCAmelCase__ ) )
results.update(is_autogenerated(UpperCAmelCase__ ) )
results.update(is_config_or_test(UpperCAmelCase__ ) )
results.update(has_no_keywords(UpperCAmelCase__ ) )
results.update(has_few_assignments(UpperCAmelCase__ ) )
return results
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> int:
if not check_uniques(UpperCAmelCase__ , UpperCAmelCase__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def snake_case (UpperCAmelCase__ ) -> str:
with open(UpperCAmelCase__ , 'rb' ) as f_in:
with gzip.open(str(UpperCAmelCase__ ) + '.gz' , 'wb' , compresslevel=6 ) as f_out:
shutil.copyfileobj(UpperCAmelCase__ , UpperCAmelCase__ )
os.unlink(UpperCAmelCase__ )
# Settings
A_ : Tuple = HfArgumentParser(PreprocessingArguments)
A_ : str = parser.parse_args()
if args.num_workers is None:
A_ : Optional[int] = multiprocessing.cpu_count()
A_ : Dict = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
A_ : Any = time.time()
A_ : Tuple = load_dataset(args.dataset_name, split='train')
print(F'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
A_ : List[Any] = time.time()
A_ : Union[str, Any] = ds.map(preprocess, num_proc=args.num_workers)
print(F'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
A_ : Optional[int] = set(ds.unique('hash'))
A_ : Union[str, Any] = len(uniques) / len(ds)
print(F'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
A_ : Optional[int] = time.time()
A_ : Optional[int] = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(F'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(F'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
A_ : str = time.time()
A_ , A_ : Optional[Any] = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(F'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
A_ : Any = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
A_ : Optional[int] = output_dir / 'data'
data_dir.mkdir(exist_ok=True)
A_ : Tuple = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
A_ : Dict = str(data_dir / F'''file-{file_number+1:012}.json''')
A_ : List[str] = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F'''Time to save dataset: {time.time()-t_start:.2f}''')
| 57 |
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : Union[List[PIL.Image.Image], np.ndarray]
a : Optional[List[bool]]
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 57 | 1 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
UpperCamelCase_: Union[str, Any] = 'ylacombe/bark-small'
UpperCamelCase_: Optional[int] = tempfile.mkdtemp()
UpperCamelCase_: Dict = 'en_speaker_1'
UpperCamelCase_: List[Any] = 'This is a test string'
UpperCamelCase_: Tuple = 'speaker_embeddings_path.json'
UpperCamelCase_: Tuple = 'speaker_embeddings'
def _a ( self , **_lowerCamelCase ):
return AutoTokenizer.from_pretrained(self.checkpoint , **_lowerCamelCase )
def _a ( self ):
shutil.rmtree(self.tmpdirname )
def _a ( self ):
UpperCamelCase_: int = self.get_tokenizer()
UpperCamelCase_: Dict = BarkProcessor(tokenizer=_lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_: Optional[Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def _a ( self ):
UpperCamelCase_: Dict = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
UpperCamelCase_: Optional[int] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
UpperCamelCase_: Optional[int] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def _a ( self ):
UpperCamelCase_: Union[str, Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
UpperCamelCase_: int = 3_5
UpperCamelCase_: Optional[int] = 2
UpperCamelCase_: int = 8
UpperCamelCase_: Union[str, Any] = {
'semantic_prompt': np.ones(_lowerCamelCase ),
'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ),
'fine_prompt': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
UpperCamelCase_: Dict = processor(text=self.input_string , voice_preset=_lowerCamelCase )
UpperCamelCase_: str = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_lowerCamelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
UpperCamelCase_: Tuple = os.path.join(self.tmpdirname , 'file.npz' )
np.savez(_lowerCamelCase , **_lowerCamelCase )
UpperCamelCase_: List[Any] = processor(text=self.input_string , voice_preset=_lowerCamelCase )
UpperCamelCase_: Tuple = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_lowerCamelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
UpperCamelCase_: int = processor(text=self.input_string , voice_preset=self.voice_preset )
def _a ( self ):
UpperCamelCase_: Tuple = self.get_tokenizer()
UpperCamelCase_: str = BarkProcessor(tokenizer=_lowerCamelCase )
UpperCamelCase_: List[str] = processor(text=self.input_string )
UpperCamelCase_: List[Any] = tokenizer(
self.input_string , padding='max_length' , max_length=2_5_6 , add_special_tokens=_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 57 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
A_ : Tuple = logging.get_logger(__name__)
A_ : Optional[int] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
A_ : int = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def snake_case (UpperCAmelCase__ ) -> str:
UpperCamelCase_: Tuple = {}
with open(UpperCAmelCase__ , 'r' ) as file:
for line_number, line in enumerate(UpperCAmelCase__ ):
UpperCamelCase_: List[Any] = line.strip()
if line:
UpperCamelCase_: List[Any] = line.split()
UpperCamelCase_: Optional[Any] = line_number
UpperCamelCase_: Any = words[0]
UpperCamelCase_: List[Any] = value
return result
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
for attribute in key.split('.' ):
UpperCamelCase_: str = getattr(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase_: str = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(UpperCAmelCase__ ):
UpperCamelCase_: Any = PARAM_MAPPING[full_name.split('.' )[-1]]
UpperCamelCase_: Dict = 'param'
if weight_type is not None and weight_type != "param":
UpperCamelCase_: Optional[Any] = getattr(UpperCAmelCase__ , UpperCAmelCase__ ).shape
elif weight_type is not None and weight_type == "param":
UpperCamelCase_: Optional[Any] = hf_pointer
for attribute in hf_param_name.split('.' ):
UpperCamelCase_: str = getattr(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase_: Tuple = shape_pointer.shape
# let's reduce dimension
UpperCamelCase_: int = value[0]
else:
UpperCamelCase_: Union[str, Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
UpperCamelCase_: Optional[int] = value
elif weight_type == "weight_g":
UpperCamelCase_: Any = value
elif weight_type == "weight_v":
UpperCamelCase_: Union[str, Any] = value
elif weight_type == "bias":
UpperCamelCase_: Union[str, Any] = value
elif weight_type == "param":
for attribute in hf_param_name.split('.' ):
UpperCamelCase_: Dict = getattr(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase_: Optional[Any] = value
else:
UpperCamelCase_: int = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Any:
UpperCamelCase_: Union[str, Any] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(UpperCAmelCase__ ):
UpperCamelCase_: Dict = PARAM_MAPPING[full_name.split('.' )[-1]]
UpperCamelCase_: List[Any] = 'param'
if weight_type is not None and weight_type != "param":
UpperCamelCase_: List[Any] = '.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
UpperCamelCase_: Any = '.'.join([key, hf_param_name] )
else:
UpperCamelCase_: Union[str, Any] = key
UpperCamelCase_: Any = value if 'lm_head' in full_key else value[0]
A_ : str = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None ) -> Any:
UpperCamelCase_: Optional[int] = False
for key, mapped_key in MAPPING.items():
UpperCamelCase_: Tuple = 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
UpperCamelCase_: Optional[Any] = True
if "*" in mapped_key:
UpperCamelCase_: Optional[int] = name.split(UpperCAmelCase__ )[0].split('.' )[-2]
UpperCamelCase_: Any = mapped_key.replace('*' , UpperCAmelCase__ )
if "weight_g" in name:
UpperCamelCase_: Union[str, Any] = 'weight_g'
elif "weight_v" in name:
UpperCamelCase_: Dict = 'weight_v'
elif "bias" in name:
UpperCamelCase_: int = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase_: str = 'weight'
else:
UpperCamelCase_: Union[str, Any] = None
if hf_dict is not None:
rename_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
else:
set_recursively(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return is_used
return is_used
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
UpperCamelCase_: List[Any] = []
UpperCamelCase_: Dict = fairseq_model.state_dict()
UpperCamelCase_: Optional[Any] = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase_: Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , hf_model.config.feat_extract_norm == 'group' , )
UpperCamelCase_: List[Any] = True
else:
UpperCamelCase_: Tuple = load_wavaveca_layer(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if not is_used:
unused_weights.append(UpperCAmelCase__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Any:
UpperCamelCase_: Any = full_name.split('conv_layers.' )[-1]
UpperCamelCase_: int = name.split('.' )
UpperCamelCase_: int = int(items[0] )
UpperCamelCase_: Union[str, Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
UpperCamelCase_: Union[str, Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
UpperCamelCase_: int = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
UpperCamelCase_: Union[str, Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
UpperCamelCase_: List[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCAmelCase__ )
@torch.no_grad()
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=True , UpperCAmelCase__=False ) -> Dict:
if config_path is not None:
UpperCamelCase_: Tuple = WavaVecaConfig.from_pretrained(UpperCAmelCase__ )
else:
UpperCamelCase_: List[str] = WavaVecaConfig()
if is_seq_class:
UpperCamelCase_: int = read_txt_into_dict(UpperCAmelCase__ )
UpperCamelCase_: Tuple = idalabel
UpperCamelCase_: str = WavaVecaForSequenceClassification(UpperCAmelCase__ )
UpperCamelCase_: Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , )
feature_extractor.save_pretrained(UpperCAmelCase__ )
elif is_finetuned:
if dict_path:
UpperCamelCase_: List[Any] = Dictionary.load(UpperCAmelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCamelCase_: Dict = target_dict.pad_index
UpperCamelCase_: Tuple = target_dict.bos_index
UpperCamelCase_: Optional[Any] = target_dict.eos_index
UpperCamelCase_: Union[str, Any] = len(target_dict.symbols )
UpperCamelCase_: int = os.path.join(UpperCAmelCase__ , 'vocab.json' )
if not os.path.isdir(UpperCAmelCase__ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(UpperCAmelCase__ ) )
return
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
UpperCamelCase_: str = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCamelCase_: List[str] = 0
UpperCamelCase_: List[Any] = 1
with open(UpperCAmelCase__ , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase_: Union[str, Any] = WavaVecaCTCTokenizer(
UpperCAmelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=UpperCAmelCase__ , )
UpperCamelCase_: Any = True if config.feat_extract_norm == 'layer' else False
UpperCamelCase_: Tuple = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , )
UpperCamelCase_: Dict = WavaVecaProcessor(feature_extractor=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ )
processor.save_pretrained(UpperCAmelCase__ )
UpperCamelCase_: Any = WavaVecaForCTC(UpperCAmelCase__ )
else:
UpperCamelCase_: Any = WavaVecaForPreTraining(UpperCAmelCase__ )
if is_finetuned or is_seq_class:
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_: Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
UpperCamelCase_: List[str] = argparse.Namespace(task='audio_pretraining' )
UpperCamelCase_: Any = fairseq.tasks.setup_task(UpperCAmelCase__ )
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_: Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=UpperCAmelCase__ )
UpperCamelCase_: str = model[0].eval()
recursively_load_weights(UpperCAmelCase__ , UpperCAmelCase__ , not is_finetuned )
hf_wavavec.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
A_ : str = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
A_ : int = parser.parse_args()
A_ : str = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 57 | 1 |
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> int:
return number | (1 << position)
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> int:
return number & ~(1 << position)
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> int:
return number ^ (1 << position)
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> bool:
return ((number >> position) & 1) == 1
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> int:
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 57 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
UpperCamelCase_: Optional[int] = inspect.getfile(accelerate.test_utils )
UpperCamelCase_: Dict = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps', 'test_metrics.py'] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
UpperCamelCase_: Tuple = test_metrics
@require_cpu
def _a ( self ):
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def _a ( self ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def _a ( self ):
self.test_metrics.main()
@require_multi_gpu
def _a ( self ):
print(f'''Found {torch.cuda.device_count()} devices.''' )
UpperCamelCase_: List[Any] = ['torchrun', f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_lowerCamelCase , env=os.environ.copy() )
| 57 | 1 |
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=1_3 , _lowerCamelCase=3_0 , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=3_2 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=3_7 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=1_0 , _lowerCamelCase=0.0_2 , ):
UpperCamelCase_: Optional[int] = parent
UpperCamelCase_: int = batch_size
UpperCamelCase_: Optional[int] = image_size
UpperCamelCase_: Tuple = patch_size
UpperCamelCase_: int = num_channels
UpperCamelCase_: Union[str, Any] = is_training
UpperCamelCase_: List[str] = use_labels
UpperCamelCase_: Optional[int] = hidden_size
UpperCamelCase_: Optional[int] = num_hidden_layers
UpperCamelCase_: int = num_attention_heads
UpperCamelCase_: List[str] = intermediate_size
UpperCamelCase_: Dict = hidden_act
UpperCamelCase_: str = hidden_dropout_prob
UpperCamelCase_: int = attention_probs_dropout_prob
UpperCamelCase_: Optional[Any] = type_sequence_label_size
UpperCamelCase_: List[str] = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase_: Optional[int] = (image_size // patch_size) ** 2
UpperCamelCase_: Union[str, Any] = num_patches + 1
def _a ( self ):
UpperCamelCase_: Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_: Any = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , )
return config, pixel_values
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: List[Any] = FlaxViTModel(config=_lowerCamelCase )
UpperCamelCase_: List[str] = model(_lowerCamelCase )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase_: List[str] = (self.image_size, self.image_size)
UpperCamelCase_: Optional[Any] = (self.patch_size, self.patch_size)
UpperCamelCase_: List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Optional[int] = self.type_sequence_label_size
UpperCamelCase_: str = FlaxViTForImageClassification(config=_lowerCamelCase )
UpperCamelCase_: Dict = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase_: Optional[Any] = 1
UpperCamelCase_: int = FlaxViTForImageClassification(_lowerCamelCase )
UpperCamelCase_: Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase_: Any = model(_lowerCamelCase )
def _a ( self ):
UpperCamelCase_: List[Any] = self.prepare_config_and_inputs()
(
(
UpperCamelCase_
) ,(
UpperCamelCase_
) ,
): Union[str, Any] = config_and_inputs
UpperCamelCase_: List[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class _lowerCAmelCase( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
a : Union[str, Any] =(FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def _a ( self ):
UpperCamelCase_: List[str] = FlaxViTModelTester(self )
UpperCamelCase_: str = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=3_7 )
def _a ( self ):
self.config_tester.run_common_tests()
def _a ( self ):
UpperCamelCase_: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _a ( self ):
UpperCamelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_: int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_: int = model_class(_lowerCamelCase )
UpperCamelCase_: int = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_: Union[str, Any] = [*signature.parameters.keys()]
UpperCamelCase_: Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_: Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase_: Union[str, Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: Tuple = model_class(_lowerCamelCase )
@jax.jit
def model_jitted(_lowerCamelCase , **_lowerCamelCase ):
return model(pixel_values=_lowerCamelCase , **_lowerCamelCase )
with self.subTest('JIT Enabled' ):
UpperCamelCase_: Union[str, Any] = model_jitted(**_lowerCamelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
UpperCamelCase_: Any = model_jitted(**_lowerCamelCase ).to_tuple()
self.assertEqual(len(_lowerCamelCase ) , len(_lowerCamelCase ) )
for jitted_output, output in zip(_lowerCamelCase , _lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _a ( self ):
for model_class_name in self.all_model_classes:
UpperCamelCase_: str = model_class_name.from_pretrained('google/vit-base-patch16-224' )
UpperCamelCase_: int = model(np.ones((1, 3, 2_2_4, 2_2_4) ) )
self.assertIsNotNone(_lowerCamelCase )
| 57 |
import math
class _lowerCAmelCase:
"""simple docstring"""
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: int = 0.0
UpperCamelCase_: Tuple = 0.0
for i in range(len(_lowerCamelCase ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
for i in range(len(_lowerCamelCase ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def snake_case () -> None:
# Training Examples ( m, n )
UpperCamelCase_: List[str] = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
UpperCamelCase_: List[Any] = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
UpperCamelCase_: Dict = SelfOrganizingMap()
UpperCamelCase_: List[Any] = 3
UpperCamelCase_: List[str] = 0.5
for _ in range(UpperCAmelCase__ ):
for j in range(len(UpperCAmelCase__ ) ):
# training sample
UpperCamelCase_: int = training_samples[j]
# Compute the winning vector
UpperCamelCase_: Tuple = self_organizing_map.get_winner(UpperCAmelCase__ , UpperCAmelCase__ )
# Update the winning vector
UpperCamelCase_: Union[str, Any] = self_organizing_map.update(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# classify test sample
UpperCamelCase_: Dict = [0, 0, 0, 1]
UpperCamelCase_: Union[str, Any] = self_organizing_map.get_winner(UpperCAmelCase__ , UpperCAmelCase__ )
# results
print(F'''Clusters that the test sample belongs to : {winner}''' )
print(F'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main()
| 57 | 1 |
from __future__ import annotations
def snake_case (UpperCAmelCase__ ) -> int:
UpperCamelCase_: Optional[Any] = len(UpperCAmelCase__ ) // 2
# choose the middle 3 elements
UpperCamelCase_: Tuple = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 57 |
from collections import namedtuple
A_ : Tuple = namedtuple('from_to', 'from_ to')
A_ : int = {
'cubicmeter': from_to(1, 1),
'litre': from_to(0.001, 1000),
'kilolitre': from_to(1, 1),
'gallon': from_to(0.00454, 264.172),
'cubicyard': from_to(0.76455, 1.30795),
'cubicfoot': from_to(0.028, 35.3147),
'cup': from_to(0.000236588, 4226.75),
}
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> float:
if from_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'from_type\' value: {from_type!r} Supported values are:\n'''
+ ', '.join(UpperCAmelCase__ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'''
+ ', '.join(UpperCAmelCase__ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 57 | 1 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
A_ : Any = 'platform'
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , ) -> List[Any]:
if attention_mask is None:
UpperCamelCase_: Optional[Any] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
UpperCamelCase_: Dict = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
UpperCamelCase_: Any = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCamelCase_: int = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCamelCase_: List[str] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class _lowerCAmelCase:
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=1_3 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=9_9 , _lowerCamelCase=1_6 , _lowerCamelCase=2 , _lowerCamelCase=4 , _lowerCamelCase=4 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=3_2 , _lowerCamelCase=2 , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=0.0_2 , ):
UpperCamelCase_: Any = parent
UpperCamelCase_: List[Any] = batch_size
UpperCamelCase_: List[Any] = seq_length
UpperCamelCase_: Optional[int] = is_training
UpperCamelCase_: Optional[int] = use_labels
UpperCamelCase_: List[str] = vocab_size
UpperCamelCase_: Any = hidden_size
UpperCamelCase_: Tuple = num_hidden_layers
UpperCamelCase_: Dict = num_attention_heads
UpperCamelCase_: Union[str, Any] = intermediate_size
UpperCamelCase_: Optional[Any] = hidden_act
UpperCamelCase_: Tuple = hidden_dropout_prob
UpperCamelCase_: Tuple = attention_probs_dropout_prob
UpperCamelCase_: List[Any] = max_position_embeddings
UpperCamelCase_: str = eos_token_id
UpperCamelCase_: Optional[Any] = pad_token_id
UpperCamelCase_: List[str] = bos_token_id
UpperCamelCase_: List[str] = initializer_range
def _a ( self ):
UpperCamelCase_: Any = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
UpperCamelCase_: Union[str, Any] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
UpperCamelCase_: Dict = shift_tokens_right(_lowerCamelCase , 1 , 2 )
UpperCamelCase_: Optional[int] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_lowerCamelCase , )
UpperCamelCase_: Union[str, Any] = prepare_blenderbot_inputs_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return config, inputs_dict
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_: List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: List[str] = 2_0
UpperCamelCase_: Tuple = model_class_name(_lowerCamelCase )
UpperCamelCase_: Union[str, Any] = model.encode(inputs_dict['input_ids'] )
UpperCamelCase_ ,UpperCamelCase_: Optional[int] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
UpperCamelCase_: Any = model.init_cache(decoder_input_ids.shape[0] , _lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: List[str] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
UpperCamelCase_: int = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCamelCase_: Optional[int] = model.decode(
decoder_input_ids[:, :-1] , _lowerCamelCase , decoder_attention_mask=_lowerCamelCase , past_key_values=_lowerCamelCase , decoder_position_ids=_lowerCamelCase , )
UpperCamelCase_: Tuple = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
UpperCamelCase_: Any = model.decode(
decoder_input_ids[:, -1:] , _lowerCamelCase , decoder_attention_mask=_lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_lowerCamelCase , )
UpperCamelCase_: List[Any] = model.decode(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Tuple = 2_0
UpperCamelCase_: List[str] = model_class_name(_lowerCamelCase )
UpperCamelCase_: Union[str, Any] = model.encode(inputs_dict['input_ids'] )
UpperCamelCase_ ,UpperCamelCase_: Union[str, Any] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
UpperCamelCase_: Optional[Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
UpperCamelCase_: Dict = model.init_cache(decoder_input_ids.shape[0] , _lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: str = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCamelCase_: int = model.decode(
decoder_input_ids[:, :-1] , _lowerCamelCase , decoder_attention_mask=_lowerCamelCase , past_key_values=_lowerCamelCase , decoder_position_ids=_lowerCamelCase , )
UpperCamelCase_: Union[str, Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
UpperCamelCase_: Any = model.decode(
decoder_input_ids[:, -1:] , _lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_lowerCamelCase , decoder_position_ids=_lowerCamelCase , )
UpperCamelCase_: Optional[int] = model.decode(_lowerCamelCase , _lowerCamelCase , decoder_attention_mask=_lowerCamelCase )
UpperCamelCase_: List[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
a : Any =99
def _a ( self ):
UpperCamelCase_: Union[str, Any] = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
UpperCamelCase_: str = input_ids.shape[0]
UpperCamelCase_: str = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_: str = self._get_config_and_data()
UpperCamelCase_: Optional[Any] = FlaxBlenderbotForConditionalGeneration(_lowerCamelCase )
UpperCamelCase_: Any = lm_model(input_ids=_lowerCamelCase )
UpperCamelCase_: Union[str, Any] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , _lowerCamelCase )
def _a ( self ):
UpperCamelCase_: Dict = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
UpperCamelCase_: Optional[int] = FlaxBlenderbotForConditionalGeneration(_lowerCamelCase )
UpperCamelCase_: Union[str, Any] = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
UpperCamelCase_: Tuple = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
UpperCamelCase_: Tuple = lm_model(input_ids=_lowerCamelCase , decoder_input_ids=_lowerCamelCase )
UpperCamelCase_: Any = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , _lowerCamelCase )
def _a ( self ):
UpperCamelCase_: Union[str, Any] = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
UpperCamelCase_: Tuple = shift_tokens_right(_lowerCamelCase , 1 , 2 )
UpperCamelCase_: Union[str, Any] = np.equal(_lowerCamelCase , 1 ).astype(np.floataa ).sum()
UpperCamelCase_: Optional[int] = np.equal(_lowerCamelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_lowerCamelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class _lowerCAmelCase( UpperCAmelCase_ , unittest.TestCase , UpperCAmelCase_ ):
"""simple docstring"""
a : Optional[Any] =True
a : Union[str, Any] =(
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
a : Optional[int] =(FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def _a ( self ):
UpperCamelCase_: Optional[int] = FlaxBlenderbotModelTester(self )
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_: str = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_: List[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase_: Dict = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: Union[str, Any] = model_class(_lowerCamelCase )
@jax.jit
def encode_jitted(_lowerCamelCase , _lowerCamelCase=None , **_lowerCamelCase ):
return model.encode(input_ids=_lowerCamelCase , attention_mask=_lowerCamelCase )
with self.subTest('JIT Enabled' ):
UpperCamelCase_: Optional[int] = encode_jitted(**_lowerCamelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
UpperCamelCase_: Dict = encode_jitted(**_lowerCamelCase ).to_tuple()
self.assertEqual(len(_lowerCamelCase ) , len(_lowerCamelCase ) )
for jitted_output, output in zip(_lowerCamelCase , _lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase_: Tuple = model_class(_lowerCamelCase )
UpperCamelCase_: Dict = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
UpperCamelCase_: List[str] = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
return model.decode(
decoder_input_ids=_lowerCamelCase , decoder_attention_mask=_lowerCamelCase , encoder_outputs=_lowerCamelCase , )
with self.subTest('JIT Enabled' ):
UpperCamelCase_: List[Any] = decode_jitted(**_lowerCamelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
UpperCamelCase_: Dict = decode_jitted(**_lowerCamelCase ).to_tuple()
self.assertEqual(len(_lowerCamelCase ) , len(_lowerCamelCase ) )
for jitted_output, output in zip(_lowerCamelCase , _lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _a ( self ):
for model_class_name in self.all_model_classes:
UpperCamelCase_: Dict = model_class_name.from_pretrained('facebook/blenderbot-400M-distill' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
UpperCamelCase_: Optional[Any] = np.ones((1, 1) ) * model.config.eos_token_id
UpperCamelCase_: List[Any] = model(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
@unittest.skipUnless(jax_device != 'cpu' , '3B test too slow on CPU.' )
@slow
def _a ( self ):
UpperCamelCase_: Optional[int] = {'num_beams': 1, 'early_stopping': True, 'min_length': 1_5, 'max_length': 2_5}
UpperCamelCase_: List[Any] = {'skip_special_tokens': True, 'clean_up_tokenization_spaces': True}
UpperCamelCase_: Tuple = FlaxBlenderbotForConditionalGeneration.from_pretrained('facebook/blenderbot-3B' , from_pt=_lowerCamelCase )
UpperCamelCase_: Optional[int] = BlenderbotTokenizer.from_pretrained('facebook/blenderbot-3B' )
UpperCamelCase_: int = ['Sam']
UpperCamelCase_: str = tokenizer(_lowerCamelCase , return_tensors='jax' )
UpperCamelCase_: Union[str, Any] = model.generate(**_lowerCamelCase , **_lowerCamelCase )
UpperCamelCase_: Any = 'Sam is a great name. It means "sun" in Gaelic.'
UpperCamelCase_: Any = tokenizer.batch_decode(_lowerCamelCase , **_lowerCamelCase )
assert generated_txt[0].strip() == tgt_text
| 57 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
A_ : int = logging.get_logger(__name__)
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
warnings.warn(
'The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use CLIPImageProcessor instead.' , _lowerCamelCase , )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
| 57 | 1 |
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
A_ : Optional[int] = {
'iou_prediction_head.layers.0': 'iou_prediction_head.proj_in',
'iou_prediction_head.layers.1': 'iou_prediction_head.layers.0',
'iou_prediction_head.layers.2': 'iou_prediction_head.proj_out',
'mask_decoder.output_upscaling.0': 'mask_decoder.upscale_conv1',
'mask_decoder.output_upscaling.1': 'mask_decoder.upscale_layer_norm',
'mask_decoder.output_upscaling.3': 'mask_decoder.upscale_conv2',
'mask_downscaling.0': 'mask_embed.conv1',
'mask_downscaling.1': 'mask_embed.layer_norm1',
'mask_downscaling.3': 'mask_embed.conv2',
'mask_downscaling.4': 'mask_embed.layer_norm2',
'mask_downscaling.6': 'mask_embed.conv3',
'point_embeddings': 'point_embed',
'pe_layer.positional_encoding_gaussian_matrix': 'shared_embedding.positional_embedding',
'image_encoder': 'vision_encoder',
'neck.0': 'neck.conv1',
'neck.1': 'neck.layer_norm1',
'neck.2': 'neck.conv2',
'neck.3': 'neck.layer_norm2',
'patch_embed.proj': 'patch_embed.projection',
'.norm': '.layer_norm',
'blocks': 'layers',
}
def snake_case (UpperCAmelCase__ ) -> Union[str, Any]:
UpperCamelCase_: str = {}
state_dict.pop('pixel_mean' , UpperCAmelCase__ )
state_dict.pop('pixel_std' , UpperCAmelCase__ )
UpperCamelCase_: List[Any] = R'.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
UpperCamelCase_: Dict = key.replace(UpperCAmelCase__ , UpperCAmelCase__ )
if re.match(UpperCAmelCase__ , UpperCAmelCase__ ):
UpperCamelCase_: Optional[Any] = int(re.match(UpperCAmelCase__ , UpperCAmelCase__ ).group(2 ) )
if layer_nb == 0:
UpperCamelCase_: Union[str, Any] = key.replace('layers.0' , 'proj_in' )
elif layer_nb == 1:
UpperCamelCase_: Any = key.replace('layers.1' , 'layers.0' )
elif layer_nb == 2:
UpperCamelCase_: Optional[int] = key.replace('layers.2' , 'proj_out' )
UpperCamelCase_: List[Any] = value
UpperCamelCase_: str = model_state_dict[
'prompt_encoder.shared_embedding.positional_embedding'
]
return model_state_dict
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__="ybelkada/segment-anything" ) -> Union[str, Any]:
UpperCamelCase_: int = hf_hub_download(UpperCAmelCase__ , F'''checkpoints/{model_name}.pth''' )
if "sam_vit_b" in model_name:
UpperCamelCase_: Optional[int] = SamConfig()
elif "sam_vit_l" in model_name:
UpperCamelCase_: Union[str, Any] = SamVisionConfig(
hidden_size=1_0_2_4 , num_hidden_layers=2_4 , num_attention_heads=1_6 , global_attn_indexes=[5, 1_1, 1_7, 2_3] , )
UpperCamelCase_: Dict = SamConfig(
vision_config=UpperCAmelCase__ , )
elif "sam_vit_h" in model_name:
UpperCamelCase_: Any = SamVisionConfig(
hidden_size=1_2_8_0 , num_hidden_layers=3_2 , num_attention_heads=1_6 , global_attn_indexes=[7, 1_5, 2_3, 3_1] , )
UpperCamelCase_: Optional[Any] = SamConfig(
vision_config=UpperCAmelCase__ , )
UpperCamelCase_: Optional[int] = torch.load(UpperCAmelCase__ , map_location='cpu' )
UpperCamelCase_: Dict = replace_keys(UpperCAmelCase__ )
UpperCamelCase_: Tuple = SamImageProcessor()
UpperCamelCase_: List[str] = SamProcessor(image_processor=UpperCAmelCase__ )
UpperCamelCase_: Dict = SamModel(UpperCAmelCase__ )
hf_model.load_state_dict(UpperCAmelCase__ )
UpperCamelCase_: List[str] = hf_model.to('cuda' )
UpperCamelCase_: int = 'https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'
UpperCamelCase_: List[Any] = Image.open(requests.get(UpperCAmelCase__ , stream=UpperCAmelCase__ ).raw ).convert('RGB' )
UpperCamelCase_: int = [[[4_0_0, 6_5_0]]]
UpperCamelCase_: str = [[1]]
UpperCamelCase_: str = processor(images=np.array(UpperCAmelCase__ ) , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
UpperCamelCase_: List[Any] = hf_model(**UpperCAmelCase__ )
UpperCamelCase_: Optional[int] = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_8902_5115_9668
UpperCamelCase_: Optional[Any] = processor(
images=np.array(UpperCAmelCase__ ) , input_points=UpperCAmelCase__ , input_labels=UpperCAmelCase__ , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
UpperCamelCase_: Optional[int] = hf_model(**UpperCAmelCase__ )
UpperCamelCase_: Optional[Any] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9712_6030_9219_3604
UpperCamelCase_: Union[str, Any] = ((7_5, 2_7_5, 1_7_2_5, 8_5_0),)
UpperCamelCase_: List[Any] = processor(images=np.array(UpperCAmelCase__ ) , input_boxes=UpperCAmelCase__ , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
UpperCamelCase_: Optional[int] = hf_model(**UpperCAmelCase__ )
UpperCamelCase_: str = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8686_0156_0592_6514
# Test with 2 points and 1 image.
UpperCamelCase_: List[Any] = [[[4_0_0, 6_5_0], [8_0_0, 6_5_0]]]
UpperCamelCase_: Dict = [[1, 1]]
UpperCamelCase_: Optional[Any] = processor(
images=np.array(UpperCAmelCase__ ) , input_points=UpperCAmelCase__ , input_labels=UpperCAmelCase__ , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
UpperCamelCase_: List[Any] = hf_model(**UpperCAmelCase__ )
UpperCamelCase_: str = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9936_0477_9243_4692
if __name__ == "__main__":
A_ : str = argparse.ArgumentParser()
A_ : str = ['sam_vit_b_01ec64', 'sam_vit_h_4b8939', 'sam_vit_l_0b3195']
parser.add_argument(
'--model_name',
default='sam_vit_h_4b8939',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
parser.add_argument(
'--model_hub_id',
default='ybelkada/segment-anything',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
A_ : Tuple = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 57 |
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
A_ : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
A_ : Optional[int] = 256
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : Union[str, Any] =['''melgan''']
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
super().__init__()
# From MELGAN
UpperCamelCase_: Any = math.log(1e-5 ) # Matches MelGAN training.
UpperCamelCase_: List[Any] = 4.0 # Largest value for most examples
UpperCamelCase_: Tuple = 1_2_8
self.register_modules(
notes_encoder=_lowerCamelCase , continuous_encoder=_lowerCamelCase , decoder=_lowerCamelCase , scheduler=_lowerCamelCase , melgan=_lowerCamelCase , )
def _a ( self , _lowerCamelCase , _lowerCamelCase=(-1.0, 1.0) , _lowerCamelCase=False ):
UpperCamelCase_ ,UpperCamelCase_: Union[str, Any] = output_range
if clip:
UpperCamelCase_: int = torch.clip(_lowerCamelCase , self.min_value , self.max_value )
# Scale to [0, 1].
UpperCamelCase_: List[Any] = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def _a ( self , _lowerCamelCase , _lowerCamelCase=(-1.0, 1.0) , _lowerCamelCase=False ):
UpperCamelCase_ ,UpperCamelCase_: Dict = input_range
UpperCamelCase_: List[str] = torch.clip(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) if clip else outputs
# Scale to [0, 1].
UpperCamelCase_: Optional[Any] = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: str = input_tokens > 0
UpperCamelCase_ ,UpperCamelCase_: Tuple = self.notes_encoder(
encoder_input_tokens=_lowerCamelCase , encoder_inputs_mask=_lowerCamelCase )
UpperCamelCase_ ,UpperCamelCase_: Any = self.continuous_encoder(
encoder_inputs=_lowerCamelCase , encoder_inputs_mask=_lowerCamelCase )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Dict = noise_time
if not torch.is_tensor(_lowerCamelCase ):
UpperCamelCase_: List[str] = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(_lowerCamelCase ) and len(timesteps.shape ) == 0:
UpperCamelCase_: Dict = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCamelCase_: Union[str, Any] = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
UpperCamelCase_: Any = self.decoder(
encodings_and_masks=_lowerCamelCase , decoder_input_tokens=_lowerCamelCase , decoder_noise_time=_lowerCamelCase )
return logits
@torch.no_grad()
def __call__( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = 1_0_0 , _lowerCamelCase = True , _lowerCamelCase = "numpy" , _lowerCamelCase = None , _lowerCamelCase = 1 , ):
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_lowerCamelCase , _lowerCamelCase ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(_lowerCamelCase )}.''' )
UpperCamelCase_: List[str] = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
UpperCamelCase_: str = np.zeros([1, 0, self.n_dims] , np.floataa )
UpperCamelCase_: Dict = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=_lowerCamelCase , device=self.device )
for i, encoder_input_tokens in enumerate(_lowerCamelCase ):
if i == 0:
UpperCamelCase_: str = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
UpperCamelCase_: Tuple = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=_lowerCamelCase , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
UpperCamelCase_: Any = ones
UpperCamelCase_: str = self.scale_features(
_lowerCamelCase , output_range=[-1.0, 1.0] , clip=_lowerCamelCase )
UpperCamelCase_: Union[str, Any] = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=_lowerCamelCase , continuous_mask=_lowerCamelCase , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
UpperCamelCase_: List[str] = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=_lowerCamelCase , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(_lowerCamelCase )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
UpperCamelCase_: int = self.decode(
encodings_and_masks=_lowerCamelCase , input_tokens=_lowerCamelCase , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
UpperCamelCase_: Tuple = self.scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ).prev_sample
UpperCamelCase_: List[Any] = self.scale_to_features(_lowerCamelCase , input_range=[-1.0, 1.0] )
UpperCamelCase_: Any = mel[:1]
UpperCamelCase_: List[str] = mel.cpu().float().numpy()
UpperCamelCase_: Tuple = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_lowerCamelCase , _lowerCamelCase )
logger.info('Generated segment' , _lowerCamelCase )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.' )
if output_type == "numpy":
UpperCamelCase_: int = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
UpperCamelCase_: int = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=_lowerCamelCase )
| 57 | 1 |
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> str:
UpperCamelCase_: int = len(UpperCAmelCase__ )
UpperCamelCase_: int = len(UpperCAmelCase__ )
UpperCamelCase_: int = (
first_str_length if first_str_length > second_str_length else second_str_length
)
UpperCamelCase_: list = []
for char_count in range(UpperCAmelCase__ ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(UpperCAmelCase__ )
if __name__ == "__main__":
print(alternative_string_arrange('AB', 'XYZ'), end=' ')
| 57 |
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
A_ : Union[str, Any] = datasets.utils.logging.get_logger(__name__)
A_ : Optional[Any] = ['names', 'prefix']
A_ : List[str] = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
A_ : List[Any] = ['encoding_errors', 'on_bad_lines']
A_ : Optional[Any] = ['date_format']
@dataclass
class _lowerCAmelCase( datasets.BuilderConfig ):
"""simple docstring"""
a : str =","
a : Optional[str] =None
a : Optional[Union[int, List[int], str]] ="infer"
a : Optional[List[str]] =None
a : Optional[List[str]] =None
a : Optional[Union[int, str, List[int], List[str]]] =None
a : Optional[Union[List[int], List[str]]] =None
a : Optional[str] =None
a : bool =True
a : Optional[Literal["c", "python", "pyarrow"]] =None
a : Dict[Union[int, str], Callable[[Any], Any]] =None
a : Optional[list] =None
a : Optional[list] =None
a : bool =False
a : Optional[Union[int, List[int]]] =None
a : Optional[int] =None
a : Optional[Union[str, List[str]]] =None
a : bool =True
a : bool =True
a : bool =False
a : bool =True
a : Optional[str] =None
a : str ="."
a : Optional[str] =None
a : str ='"'
a : int =0
a : Optional[str] =None
a : Optional[str] =None
a : Optional[str] =None
a : Optional[str] =None
a : bool =True
a : bool =True
a : int =0
a : bool =True
a : bool =False
a : Optional[str] =None
a : int =10000
a : Optional[datasets.Features] =None
a : Optional[str] ="strict"
a : Literal["error", "warn", "skip"] ="error"
a : Optional[str] =None
def _a ( self ):
if self.delimiter is not None:
UpperCamelCase_: Optional[Any] = self.delimiter
if self.column_names is not None:
UpperCamelCase_: int = self.column_names
@property
def _a ( self ):
UpperCamelCase_: Any = {
'sep': self.sep,
'header': self.header,
'names': self.names,
'index_col': self.index_col,
'usecols': self.usecols,
'prefix': self.prefix,
'mangle_dupe_cols': self.mangle_dupe_cols,
'engine': self.engine,
'converters': self.converters,
'true_values': self.true_values,
'false_values': self.false_values,
'skipinitialspace': self.skipinitialspace,
'skiprows': self.skiprows,
'nrows': self.nrows,
'na_values': self.na_values,
'keep_default_na': self.keep_default_na,
'na_filter': self.na_filter,
'verbose': self.verbose,
'skip_blank_lines': self.skip_blank_lines,
'thousands': self.thousands,
'decimal': self.decimal,
'lineterminator': self.lineterminator,
'quotechar': self.quotechar,
'quoting': self.quoting,
'escapechar': self.escapechar,
'comment': self.comment,
'encoding': self.encoding,
'dialect': self.dialect,
'error_bad_lines': self.error_bad_lines,
'warn_bad_lines': self.warn_bad_lines,
'skipfooter': self.skipfooter,
'doublequote': self.doublequote,
'memory_map': self.memory_map,
'float_precision': self.float_precision,
'chunksize': self.chunksize,
'encoding_errors': self.encoding_errors,
'on_bad_lines': self.on_bad_lines,
'date_format': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , _lowerCamelCase ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class _lowerCAmelCase( datasets.ArrowBasedBuilder ):
"""simple docstring"""
a : Dict =CsvConfig
def _a ( self ):
return datasets.DatasetInfo(features=self.config.features )
def _a ( self , _lowerCamelCase ):
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
UpperCamelCase_: Tuple = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_lowerCamelCase , (str, list, tuple) ):
UpperCamelCase_: List[Any] = data_files
if isinstance(_lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: str = [files]
UpperCamelCase_: Tuple = [dl_manager.iter_files(_lowerCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
UpperCamelCase_: Tuple = []
for split_name, files in data_files.items():
if isinstance(_lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Dict = [files]
UpperCamelCase_: int = [dl_manager.iter_files(_lowerCamelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_lowerCamelCase , gen_kwargs={'files': files} ) )
return splits
def _a ( self , _lowerCamelCase ):
if self.config.features is not None:
UpperCamelCase_: List[Any] = self.config.features.arrow_schema
if all(not require_storage_cast(_lowerCamelCase ) for feature in self.config.features.values() ):
# cheaper cast
UpperCamelCase_: Optional[int] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=_lowerCamelCase )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
UpperCamelCase_: int = table_cast(_lowerCamelCase , _lowerCamelCase )
return pa_table
def _a ( self , _lowerCamelCase ):
UpperCamelCase_: List[str] = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
UpperCamelCase_: Dict = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(_lowerCamelCase ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(_lowerCamelCase ) ):
UpperCamelCase_: Optional[Any] = pd.read_csv(_lowerCamelCase , iterator=_lowerCamelCase , dtype=_lowerCamelCase , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(_lowerCamelCase ):
UpperCamelCase_: Union[str, Any] = pa.Table.from_pandas(_lowerCamelCase )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_lowerCamelCase )
except ValueError as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}''' )
raise
| 57 | 1 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_: str = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-canny' , from_pt=_lowerCamelCase , dtype=jnp.bfloataa )
UpperCamelCase_ ,UpperCamelCase_: List[str] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=_lowerCamelCase , from_pt=_lowerCamelCase , dtype=jnp.bfloataa )
UpperCamelCase_: List[str] = controlnet_params
UpperCamelCase_: Any = 'bird'
UpperCamelCase_: Optional[int] = jax.device_count()
UpperCamelCase_: str = pipe.prepare_text_inputs([prompts] * num_samples )
UpperCamelCase_: List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' )
UpperCamelCase_: Union[str, Any] = pipe.prepare_image_inputs([canny_image] * num_samples )
UpperCamelCase_: Tuple = jax.random.PRNGKey(0 )
UpperCamelCase_: Optional[Any] = jax.random.split(_lowerCamelCase , jax.device_count() )
UpperCamelCase_: Optional[Any] = replicate(_lowerCamelCase )
UpperCamelCase_: List[Any] = shard(_lowerCamelCase )
UpperCamelCase_: List[Any] = shard(_lowerCamelCase )
UpperCamelCase_: Tuple = pipe(
prompt_ids=_lowerCamelCase , image=_lowerCamelCase , params=_lowerCamelCase , prng_seed=_lowerCamelCase , num_inference_steps=5_0 , jit=_lowerCamelCase , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
UpperCamelCase_: Dict = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCamelCase_: str = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
UpperCamelCase_: int = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCamelCase_: Any = jnp.array(
[0.1_6_7_9_6_9, 0.1_1_6_6_9_9, 0.0_8_1_5_4_3, 0.1_5_4_2_9_7, 0.1_3_2_8_1_2, 0.1_0_8_8_8_7, 0.1_6_9_9_2_2, 0.1_6_9_9_2_2, 0.2_0_5_0_7_8] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_: int = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-openpose' , from_pt=_lowerCamelCase , dtype=jnp.bfloataa )
UpperCamelCase_ ,UpperCamelCase_: List[str] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=_lowerCamelCase , from_pt=_lowerCamelCase , dtype=jnp.bfloataa )
UpperCamelCase_: str = controlnet_params
UpperCamelCase_: Any = 'Chef in the kitchen'
UpperCamelCase_: Any = jax.device_count()
UpperCamelCase_: List[str] = pipe.prepare_text_inputs([prompts] * num_samples )
UpperCamelCase_: List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' )
UpperCamelCase_: Any = pipe.prepare_image_inputs([pose_image] * num_samples )
UpperCamelCase_: Optional[Any] = jax.random.PRNGKey(0 )
UpperCamelCase_: List[str] = jax.random.split(_lowerCamelCase , jax.device_count() )
UpperCamelCase_: Union[str, Any] = replicate(_lowerCamelCase )
UpperCamelCase_: List[str] = shard(_lowerCamelCase )
UpperCamelCase_: List[str] = shard(_lowerCamelCase )
UpperCamelCase_: List[Any] = pipe(
prompt_ids=_lowerCamelCase , image=_lowerCamelCase , params=_lowerCamelCase , prng_seed=_lowerCamelCase , num_inference_steps=5_0 , jit=_lowerCamelCase , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
UpperCamelCase_: Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCamelCase_: Optional[int] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
UpperCamelCase_: str = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCamelCase_: Any = jnp.array(
[[0.2_7_1_4_8_4, 0.2_6_1_7_1_9, 0.2_7_5_3_9_1, 0.2_7_7_3_4_4, 0.2_7_9_2_9_7, 0.2_9_1_0_1_6, 0.2_9_4_9_2_2, 0.3_0_2_7_3_4, 0.3_0_2_7_3_4]] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 57 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : str = logging.get_logger(__name__)
A_ : Union[str, Any] = {
's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json',
}
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : Tuple ='''open-llama'''
def __init__( self , _lowerCamelCase=1_0_0_0_0_0 , _lowerCamelCase=4_0_9_6 , _lowerCamelCase=1_1_0_0_8 , _lowerCamelCase=3_2 , _lowerCamelCase=3_2 , _lowerCamelCase="silu" , _lowerCamelCase=2_0_4_8 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1e-6 , _lowerCamelCase=True , _lowerCamelCase=0 , _lowerCamelCase=1 , _lowerCamelCase=2 , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=None , **_lowerCamelCase , ):
UpperCamelCase_: int = vocab_size
UpperCamelCase_: List[Any] = max_position_embeddings
UpperCamelCase_: Dict = hidden_size
UpperCamelCase_: Dict = intermediate_size
UpperCamelCase_: Union[str, Any] = num_hidden_layers
UpperCamelCase_: Dict = num_attention_heads
UpperCamelCase_: Union[str, Any] = hidden_act
UpperCamelCase_: Union[str, Any] = initializer_range
UpperCamelCase_: List[Any] = rms_norm_eps
UpperCamelCase_: Union[str, Any] = use_cache
UpperCamelCase_: Dict = kwargs.pop(
'use_memorry_efficient_attention' , _lowerCamelCase )
UpperCamelCase_: Union[str, Any] = hidden_dropout_prob
UpperCamelCase_: Any = attention_dropout_prob
UpperCamelCase_: int = use_stable_embedding
UpperCamelCase_: Tuple = shared_input_output_embedding
UpperCamelCase_: str = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , tie_word_embeddings=_lowerCamelCase , **_lowerCamelCase , )
def _a ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _lowerCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f'''got {self.rope_scaling}''' )
UpperCamelCase_: str = self.rope_scaling.get('type' , _lowerCamelCase )
UpperCamelCase_: int = self.rope_scaling.get('factor' , _lowerCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(_lowerCamelCase , _lowerCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 57 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=7 , _lowerCamelCase=3 , _lowerCamelCase=1_8 , _lowerCamelCase=3_0 , _lowerCamelCase=4_0_0 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=None , ):
UpperCamelCase_: str = size if size is not None else {'shortest_edge': 2_0}
UpperCamelCase_: str = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8}
UpperCamelCase_: List[str] = parent
UpperCamelCase_: Union[str, Any] = batch_size
UpperCamelCase_: Tuple = num_channels
UpperCamelCase_: Dict = image_size
UpperCamelCase_: str = min_resolution
UpperCamelCase_: Dict = max_resolution
UpperCamelCase_: Dict = do_resize
UpperCamelCase_: Dict = size
UpperCamelCase_: Union[str, Any] = do_center_crop
UpperCamelCase_: Union[str, Any] = crop_size
def _a ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _lowerCAmelCase( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
a : Any =MobileNetVaImageProcessor if is_vision_available() else None
def _a ( self ):
UpperCamelCase_: List[Any] = MobileNetVaImageProcessingTester(self )
@property
def _a ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self ):
UpperCamelCase_: Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(_lowerCamelCase , 'size' ) )
self.assertTrue(hasattr(_lowerCamelCase , 'do_center_crop' ) )
self.assertTrue(hasattr(_lowerCamelCase , 'crop_size' ) )
def _a ( self ):
UpperCamelCase_: Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 2_0} )
self.assertEqual(image_processor.crop_size , {'height': 1_8, 'width': 1_8} )
UpperCamelCase_: List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'shortest_edge': 4_2} )
self.assertEqual(image_processor.crop_size , {'height': 8_4, 'width': 8_4} )
def _a ( self ):
pass
def _a ( self ):
# Initialize image_processing
UpperCamelCase_: Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase_: Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
UpperCamelCase_: List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
UpperCamelCase_: Tuple = image_processing(_lowerCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _a ( self ):
# Initialize image_processing
UpperCamelCase_: Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase_: Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
UpperCamelCase_: Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
UpperCamelCase_: Optional[Any] = image_processing(_lowerCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _a ( self ):
# Initialize image_processing
UpperCamelCase_: Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase_: Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
UpperCamelCase_: int = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
UpperCamelCase_: Dict = image_processing(_lowerCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 57 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCAmelCase:
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=1_3 , _lowerCamelCase=3_2 , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=1_6 , _lowerCamelCase=[3_2, 6_4, 1_2_8] , _lowerCamelCase=[1, 2, 1] , _lowerCamelCase=[2, 2, 4] , _lowerCamelCase=2 , _lowerCamelCase=2.0 , _lowerCamelCase=True , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.1 , _lowerCamelCase="gelu" , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=0.0_2 , _lowerCamelCase=1e-5 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=1_0 , _lowerCamelCase=8 , _lowerCamelCase=["stage1", "stage2"] , _lowerCamelCase=[1, 2] , ):
UpperCamelCase_: Tuple = parent
UpperCamelCase_: Dict = batch_size
UpperCamelCase_: List[str] = image_size
UpperCamelCase_: Tuple = patch_size
UpperCamelCase_: Tuple = num_channels
UpperCamelCase_: Dict = embed_dim
UpperCamelCase_: List[Any] = hidden_sizes
UpperCamelCase_: List[str] = depths
UpperCamelCase_: List[str] = num_heads
UpperCamelCase_: Optional[int] = window_size
UpperCamelCase_: Tuple = mlp_ratio
UpperCamelCase_: Dict = qkv_bias
UpperCamelCase_: str = hidden_dropout_prob
UpperCamelCase_: Optional[Any] = attention_probs_dropout_prob
UpperCamelCase_: int = drop_path_rate
UpperCamelCase_: Dict = hidden_act
UpperCamelCase_: List[str] = use_absolute_embeddings
UpperCamelCase_: Dict = patch_norm
UpperCamelCase_: Optional[Any] = layer_norm_eps
UpperCamelCase_: List[str] = initializer_range
UpperCamelCase_: List[Any] = is_training
UpperCamelCase_: Optional[int] = scope
UpperCamelCase_: str = use_labels
UpperCamelCase_: List[str] = type_sequence_label_size
UpperCamelCase_: Union[str, Any] = encoder_stride
UpperCamelCase_: Dict = out_features
UpperCamelCase_: str = out_indices
def _a ( self ):
UpperCamelCase_: int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_: List[Any] = None
if self.use_labels:
UpperCamelCase_: Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_: Optional[Any] = self.get_config()
return config, pixel_values, labels
def _a ( self ):
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Optional[int] = FocalNetModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_: int = model(_lowerCamelCase )
UpperCamelCase_: int = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCamelCase_: int = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: List[str] = FocalNetBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_: Optional[int] = model(_lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
UpperCamelCase_: int = None
UpperCamelCase_: List[Any] = FocalNetBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_: Any = model(_lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Tuple = FocalNetForMaskedImageModeling(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_: Any = model(_lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCamelCase_: List[Any] = 1
UpperCamelCase_: Dict = FocalNetForMaskedImageModeling(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_: Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase_: Union[str, Any] = model(_lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Tuple = self.type_sequence_label_size
UpperCamelCase_: List[Any] = FocalNetForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_: str = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase_: Union[str, Any] = 1
UpperCamelCase_: Dict = FocalNetForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_: Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase_: Union[str, Any] = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self ):
UpperCamelCase_: Dict = self.prepare_config_and_inputs()
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_: List[str] = config_and_inputs
UpperCamelCase_: int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
a : Optional[int] =(
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
a : Any =(
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
a : Dict =False
a : Union[str, Any] =False
a : Tuple =False
a : Optional[int] =False
a : Union[str, Any] =False
def _a ( self ):
UpperCamelCase_: str = FocalNetModelTester(self )
UpperCamelCase_: Tuple = ConfigTester(self , config_class=_lowerCamelCase , embed_dim=3_7 , has_text_modality=_lowerCamelCase )
def _a ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self ):
return
def _a ( self ):
UpperCamelCase_: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _a ( self ):
UpperCamelCase_: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowerCamelCase )
def _a ( self ):
UpperCamelCase_: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCamelCase )
def _a ( self ):
UpperCamelCase_: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@unittest.skip(reason='FocalNet does not use inputs_embeds' )
def _a ( self ):
pass
@unittest.skip(reason='FocalNet does not use feedforward chunking' )
def _a ( self ):
pass
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
UpperCamelCase_: Union[str, Any] = model_class(_lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase_: List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCamelCase , nn.Linear ) )
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
UpperCamelCase_: List[Any] = model_class(_lowerCamelCase )
UpperCamelCase_: Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_: Any = [*signature.parameters.keys()]
UpperCamelCase_: List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Tuple = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
UpperCamelCase_: Tuple = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
UpperCamelCase_: Union[str, Any] = outputs.hidden_states
UpperCamelCase_: Tuple = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
# FocalNet has a different seq_length
UpperCamelCase_: Optional[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCamelCase_: int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
UpperCamelCase_: Dict = outputs.reshaped_hidden_states
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_: int = reshaped_hidden_states[0].shape
UpperCamelCase_: List[str] = (
reshaped_hidden_states[0].view(_lowerCamelCase , _lowerCamelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_: int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
UpperCamelCase_: int = True
self.check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase_: Optional[Any] = True
self.check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_: Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_: str = 3
UpperCamelCase_: Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCamelCase_: int = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCamelCase_: List[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCamelCase_: str = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
UpperCamelCase_: Dict = True
self.check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase_: Optional[Any] = True
self.check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , (padded_height, padded_width) )
@slow
def _a ( self ):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_: List[Any] = FocalNetModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_: Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_: Dict = _config_zero_init(_lowerCamelCase )
for model_class in self.all_model_classes:
UpperCamelCase_: List[str] = model_class(config=_lowerCamelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self ):
# TODO update organization
return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None
@slow
def _a ( self ):
UpperCamelCase_: Optional[int] = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(_lowerCamelCase )
UpperCamelCase_: Optional[Any] = self.default_image_processor
UpperCamelCase_: str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
UpperCamelCase_: str = image_processor(images=_lowerCamelCase , return_tensors='pt' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
UpperCamelCase_: List[str] = model(**_lowerCamelCase )
# verify the logits
UpperCamelCase_: Optional[int] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
UpperCamelCase_: Optional[int] = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 2_8_1 )
@require_torch
class _lowerCAmelCase( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] =(FocalNetBackbone,) if is_torch_available() else ()
a : List[str] =FocalNetConfig
a : List[str] =False
def _a ( self ):
UpperCamelCase_: Any = FocalNetModelTester(self )
| 57 | 1 |
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
A_ : Dict = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
A_ : str = importlib.util.spec_from_file_location(
'transformers',
os.path.join(PATH_TO_TRANSFORMERS, '__init__.py'),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
A_ : Tuple = spec.loader.load_module()
A_ : str = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
A_ : Any = re.compile('\[(.+?)\]\((https://huggingface\.co/.+?)\)')
A_ : Optional[int] = {
'CLIPConfigMixin',
'DecisionTransformerConfigMixin',
'EncoderDecoderConfigMixin',
'RagConfigMixin',
'SpeechEncoderDecoderConfigMixin',
'VisionEncoderDecoderConfigMixin',
'VisionTextDualEncoderConfigMixin',
}
def snake_case () -> Optional[Any]:
UpperCamelCase_: str = []
for config_class in list(CONFIG_MAPPING.values() ):
UpperCamelCase_: int = False
# source code of `config_class`
UpperCamelCase_: int = inspect.getsource(UpperCAmelCase__ )
UpperCamelCase_: List[Any] = _re_checkpoint.findall(UpperCAmelCase__ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
UpperCamelCase_ ,UpperCamelCase_: int = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
UpperCamelCase_: List[str] = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
UpperCamelCase_: Optional[Any] = True
break
UpperCamelCase_: Tuple = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > 0:
UpperCamelCase_: List[Any] = '\n'.join(sorted(UpperCAmelCase__ ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 57 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ : Tuple = {
'configuration_distilbert': [
'DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'DistilBertConfig',
'DistilBertOnnxConfig',
],
'tokenization_distilbert': ['DistilBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = ['DistilBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : int = [
'DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DistilBertForMaskedLM',
'DistilBertForMultipleChoice',
'DistilBertForQuestionAnswering',
'DistilBertForSequenceClassification',
'DistilBertForTokenClassification',
'DistilBertModel',
'DistilBertPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
'TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDistilBertForMaskedLM',
'TFDistilBertForMultipleChoice',
'TFDistilBertForQuestionAnswering',
'TFDistilBertForSequenceClassification',
'TFDistilBertForTokenClassification',
'TFDistilBertMainLayer',
'TFDistilBertModel',
'TFDistilBertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : int = [
'FlaxDistilBertForMaskedLM',
'FlaxDistilBertForMultipleChoice',
'FlaxDistilBertForQuestionAnswering',
'FlaxDistilBertForSequenceClassification',
'FlaxDistilBertForTokenClassification',
'FlaxDistilBertModel',
'FlaxDistilBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
A_ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 57 | 1 |
def snake_case (UpperCAmelCase__ = 1_0_0_0 ) -> int:
UpperCamelCase_: Tuple = 3
UpperCamelCase_: List[str] = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 1_5 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 57 |
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def snake_case (UpperCAmelCase__ ) -> Union[str, Any]:
if is_torch_version('<' , '2.0.0' ) or not hasattr(UpperCAmelCase__ , '_dynamo' ):
return False
return isinstance(UpperCAmelCase__ , torch._dynamo.eval_frame.OptimizedModule )
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ = True ) -> Any:
UpperCamelCase_: Optional[Any] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
UpperCamelCase_: int = is_compiled_module(UpperCAmelCase__ )
if is_compiled:
UpperCamelCase_: List[str] = model
UpperCamelCase_: Dict = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
UpperCamelCase_: Dict = model.module
if not keep_fpaa_wrapper:
UpperCamelCase_: int = getattr(UpperCAmelCase__ , 'forward' )
UpperCamelCase_: List[str] = model.__dict__.pop('_original_forward' , UpperCAmelCase__ )
if original_forward is not None:
while hasattr(UpperCAmelCase__ , '__wrapped__' ):
UpperCamelCase_: Any = forward.__wrapped__
if forward == original_forward:
break
UpperCamelCase_: Optional[int] = forward
if getattr(UpperCAmelCase__ , '_converted_to_transformer_engine' , UpperCAmelCase__ ):
convert_model(UpperCAmelCase__ , to_transformer_engine=UpperCAmelCase__ )
if is_compiled:
UpperCamelCase_: Union[str, Any] = model
UpperCamelCase_: Tuple = compiled_model
return model
def snake_case () -> List[str]:
PartialState().wait_for_everyone()
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> Dict:
if PartialState().distributed_type == DistributedType.TPU:
xm.save(UpperCAmelCase__ , UpperCAmelCase__ )
elif PartialState().local_process_index == 0:
torch.save(UpperCAmelCase__ , UpperCAmelCase__ )
@contextmanager
def snake_case (**UpperCAmelCase__ ) -> Any:
for key, value in kwargs.items():
UpperCamelCase_: int = str(UpperCAmelCase__ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def snake_case (UpperCAmelCase__ ) -> str:
if not hasattr(UpperCAmelCase__ , '__qualname__' ) and not hasattr(UpperCAmelCase__ , '__name__' ):
UpperCamelCase_: List[Any] = getattr(UpperCAmelCase__ , '__class__' , UpperCAmelCase__ )
if hasattr(UpperCAmelCase__ , '__qualname__' ):
return obj.__qualname__
if hasattr(UpperCAmelCase__ , '__name__' ):
return obj.__name__
return str(UpperCAmelCase__ )
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> Any:
for key, value in source.items():
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
UpperCamelCase_: Any = destination.setdefault(UpperCAmelCase__ , {} )
merge_dicts(UpperCAmelCase__ , UpperCAmelCase__ )
else:
UpperCamelCase_: str = value
return destination
def snake_case (UpperCAmelCase__ = None ) -> bool:
if port is None:
UpperCamelCase_: List[str] = 2_9_5_0_0
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 57 | 1 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
A_ : Optional[Any] = 16
A_ : Tuple = 32
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ = 1_6 , UpperCAmelCase__ = "bert-base-cased" ) -> Optional[int]:
UpperCamelCase_: Union[str, Any] = AutoTokenizer.from_pretrained(UpperCAmelCase__ )
UpperCamelCase_: int = load_dataset('glue' , 'mrpc' )
def tokenize_function(UpperCAmelCase__ ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase_: int = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCamelCase_: Union[str, Any] = datasets.map(
UpperCAmelCase__ , batched=UpperCAmelCase__ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=UpperCAmelCase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase_: Optional[int] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(UpperCAmelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCAmelCase__ , padding='max_length' , max_length=1_2_8 , return_tensors='pt' )
return tokenizer.pad(UpperCAmelCase__ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
UpperCamelCase_: Optional[int] = DataLoader(
tokenized_datasets['train'] , shuffle=UpperCAmelCase__ , collate_fn=UpperCAmelCase__ , batch_size=UpperCAmelCase__ )
UpperCamelCase_: List[Any] = DataLoader(
tokenized_datasets['validation'] , shuffle=UpperCAmelCase__ , collate_fn=UpperCAmelCase__ , batch_size=UpperCAmelCase__ )
return train_dataloader, eval_dataloader
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Union[str, Any]:
model.eval()
UpperCamelCase_: Any = 0
for step, batch in enumerate(UpperCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase_: Dict = model(**UpperCAmelCase__ )
UpperCamelCase_: List[Any] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
UpperCamelCase_ ,UpperCamelCase_: Dict = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(UpperCAmelCase__ ) - 1:
UpperCamelCase_: List[str] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCamelCase_: Any = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=UpperCAmelCase__ , references=UpperCAmelCase__ , )
UpperCamelCase_: Any = metric.compute()
return eval_metric["accuracy"]
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> str:
# Initialize accelerator
UpperCamelCase_: Tuple = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase_: int = config['lr']
UpperCamelCase_: List[Any] = int(config['num_epochs'] )
UpperCamelCase_: Dict = int(config['seed'] )
UpperCamelCase_: Optional[Any] = int(config['batch_size'] )
UpperCamelCase_: int = args.model_name_or_path
set_seed(UpperCAmelCase__ )
UpperCamelCase_ ,UpperCamelCase_: List[str] = get_dataloaders(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase_: Dict = AutoModelForSequenceClassification.from_pretrained(UpperCAmelCase__ , return_dict=UpperCAmelCase__ )
# Instantiate optimizer
UpperCamelCase_: List[str] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCamelCase_: Any = optimizer_cls(params=model.parameters() , lr=UpperCAmelCase__ )
if accelerator.state.deepspeed_plugin is not None:
UpperCamelCase_: Dict = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
UpperCamelCase_: List[Any] = 1
UpperCamelCase_: List[Any] = (len(UpperCAmelCase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCamelCase_: Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase__ , num_warmup_steps=0 , num_training_steps=UpperCAmelCase__ , )
else:
UpperCamelCase_: Union[str, Any] = DummyScheduler(UpperCAmelCase__ , total_num_steps=UpperCAmelCase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_: Optional[int] = accelerator.prepare(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# We need to keep track of how many total steps we have iterated over
UpperCamelCase_: Optional[Any] = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCamelCase_: str = 0
UpperCamelCase_: int = evaluate.load('glue' , 'mrpc' )
UpperCamelCase_: int = num_epochs
if args.partial_train_epoch is not None:
UpperCamelCase_: Union[str, Any] = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
UpperCamelCase_: Union[str, Any] = args.resume_from_checkpoint.split('epoch_' )[1]
UpperCamelCase_: Tuple = ''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
UpperCamelCase_: Optional[int] = int(UpperCAmelCase__ ) + 1
UpperCamelCase_: str = evaluation_loop(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
accelerator.print('resumed checkpoint performance:' , UpperCAmelCase__ )
accelerator.print('resumed checkpoint\'s scheduler\'s lr:' , lr_scheduler.get_lr()[0] )
accelerator.print('resumed optimizers\'s lr:' , optimizer.param_groups[0]['lr'] )
with open(os.path.join(args.output_dir , F'''state_{starting_epoch-1}.json''' ) , 'r' ) as f:
UpperCamelCase_: Dict = json.load(UpperCAmelCase__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
UpperCamelCase_: Any = {}
for epoch in range(UpperCAmelCase__ , UpperCAmelCase__ ):
model.train()
for step, batch in enumerate(UpperCAmelCase__ ):
UpperCamelCase_: str = model(**UpperCAmelCase__ )
UpperCamelCase_: List[str] = outputs.loss
UpperCamelCase_: str = loss / gradient_accumulation_steps
accelerator.backward(UpperCAmelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
UpperCamelCase_: Tuple = F'''epoch_{epoch}'''
UpperCamelCase_: Union[str, Any] = os.path.join(args.output_dir , UpperCAmelCase__ )
accelerator.save_state(UpperCAmelCase__ )
UpperCamelCase_: List[Any] = evaluation_loop(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase_: Optional[Any] = accuracy
UpperCamelCase_: List[str] = lr_scheduler.get_lr()[0]
UpperCamelCase_: Optional[Any] = optimizer.param_groups[0]['lr']
UpperCamelCase_: Dict = epoch
UpperCamelCase_: Optional[int] = overall_step
accelerator.print(F'''epoch {epoch}:''' , UpperCAmelCase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F'''state_{epoch}.json''' ) , 'w' ) as f:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
def snake_case () -> Optional[Any]:
UpperCamelCase_: Optional[int] = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=UpperCAmelCase__ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=UpperCAmelCase__ , )
parser.add_argument(
'--output_dir' , type=UpperCAmelCase__ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--partial_train_epoch' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , help='If passed, the training will stop after this number of epochs.' , )
parser.add_argument(
'--num_epochs' , type=UpperCAmelCase__ , default=2 , help='Number of train epochs.' , )
UpperCamelCase_: str = parser.parse_args()
UpperCamelCase_: Dict = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 4_2, 'batch_size': 1_6}
training_function(UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 57 |
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
A_ : Optional[Any] = data_utils.TransfoXLTokenizer
A_ : Union[str, Any] = data_utils.TransfoXLCorpus
A_ : Any = data_utils
A_ : Optional[Any] = data_utils
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(UpperCAmelCase__ , 'rb' ) as fp:
UpperCamelCase_: Union[str, Any] = pickle.load(UpperCAmelCase__ , encoding='latin1' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
UpperCamelCase_: Any = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file']
print(F'''Save vocabulary to {pytorch_vocab_dump_path}''' )
UpperCamelCase_: Union[str, Any] = corpus.vocab.__dict__
torch.save(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase_: str = corpus.__dict__
corpus_dict_no_vocab.pop('vocab' , UpperCAmelCase__ )
UpperCamelCase_: str = pytorch_dump_folder_path + '/' + CORPUS_NAME
print(F'''Save dataset to {pytorch_dataset_dump_path}''' )
torch.save(UpperCAmelCase__ , UpperCAmelCase__ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
UpperCamelCase_: Any = os.path.abspath(UpperCAmelCase__ )
UpperCamelCase_: Dict = os.path.abspath(UpperCAmelCase__ )
print(F'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
UpperCamelCase_: List[str] = TransfoXLConfig()
else:
UpperCamelCase_: Optional[int] = TransfoXLConfig.from_json_file(UpperCAmelCase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
UpperCamelCase_: Union[str, Any] = TransfoXLLMHeadModel(UpperCAmelCase__ )
UpperCamelCase_: Tuple = load_tf_weights_in_transfo_xl(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Save pytorch-model
UpperCamelCase_: str = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase_: Union[str, Any] = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
print(F'''Save PyTorch model to {os.path.abspath(UpperCAmelCase__ )}''' )
torch.save(model.state_dict() , UpperCAmelCase__ )
print(F'''Save configuration file to {os.path.abspath(UpperCAmelCase__ )}''' )
with open(UpperCAmelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A_ : int = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--tf_checkpoint_path',
default='',
type=str,
help='An optional path to a TensorFlow checkpoint path to be converted.',
)
parser.add_argument(
'--transfo_xl_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--transfo_xl_dataset_file',
default='',
type=str,
help='An optional dataset file to be converted in a vocabulary.',
)
A_ : Tuple = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 57 | 1 |
from PIL import Image
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> Image:
UpperCamelCase_: Union[str, Any] = (2_5_9 * (level + 2_5_5)) / (2_5_5 * (2_5_9 - level))
def contrast(UpperCAmelCase__ ) -> int:
return int(1_2_8 + factor * (c - 1_2_8) )
return img.point(UpperCAmelCase__ )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change contrast to 170
A_ : List[Any] = change_contrast(img, 170)
cont_img.save('image_data/lena_high_contrast.png', format='png')
| 57 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
A_ : List[str] = logging.get_logger(__name__)
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> Union[str, Any]:
UpperCamelCase_: Tuple = b.T
UpperCamelCase_: Tuple = np.sum(np.square(UpperCAmelCase__ ) , axis=1 )
UpperCamelCase_: Optional[Any] = np.sum(np.square(UpperCAmelCase__ ) , axis=0 )
UpperCamelCase_: Optional[int] = np.matmul(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase_: List[Any] = aa[:, None] - 2 * ab + ba[None, :]
return d
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[Any]:
UpperCamelCase_: List[str] = x.reshape(-1 , 3 )
UpperCamelCase_: Union[str, Any] = squared_euclidean_distance(UpperCAmelCase__ , UpperCAmelCase__ )
return np.argmin(UpperCAmelCase__ , axis=1 )
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : Any =['''pixel_values''']
def __init__( self , _lowerCamelCase = None , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = PILImageResampling.BILINEAR , _lowerCamelCase = True , _lowerCamelCase = True , **_lowerCamelCase , ):
super().__init__(**_lowerCamelCase )
UpperCamelCase_: List[str] = size if size is not None else {'height': 2_5_6, 'width': 2_5_6}
UpperCamelCase_: str = get_size_dict(_lowerCamelCase )
UpperCamelCase_: Any = np.array(_lowerCamelCase ) if clusters is not None else None
UpperCamelCase_: Optional[int] = do_resize
UpperCamelCase_: List[Any] = size
UpperCamelCase_: Optional[int] = resample
UpperCamelCase_: str = do_normalize
UpperCamelCase_: str = do_color_quantize
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = PILImageResampling.BILINEAR , _lowerCamelCase = None , **_lowerCamelCase , ):
UpperCamelCase_: Any = get_size_dict(_lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size dictionary must contain both height and width keys. Got {size.keys()}''' )
return resize(
_lowerCamelCase , size=(size['height'], size['width']) , resample=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def _a ( self , _lowerCamelCase , _lowerCamelCase = None , ):
UpperCamelCase_: Optional[Any] = rescale(image=_lowerCamelCase , scale=1 / 1_2_7.5 , data_format=_lowerCamelCase )
UpperCamelCase_: Optional[Any] = image - 1
return image
def _a ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = ChannelDimension.FIRST , **_lowerCamelCase , ):
UpperCamelCase_: Optional[Any] = do_resize if do_resize is not None else self.do_resize
UpperCamelCase_: Tuple = size if size is not None else self.size
UpperCamelCase_: Union[str, Any] = get_size_dict(_lowerCamelCase )
UpperCamelCase_: Union[str, Any] = resample if resample is not None else self.resample
UpperCamelCase_: Any = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase_: str = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
UpperCamelCase_: Dict = clusters if clusters is not None else self.clusters
UpperCamelCase_: Dict = np.array(_lowerCamelCase )
UpperCamelCase_: Optional[int] = make_list_of_images(_lowerCamelCase )
if not valid_images(_lowerCamelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_color_quantize and clusters is None:
raise ValueError('Clusters must be specified if do_color_quantize is True.' )
# All transformations expect numpy arrays.
UpperCamelCase_: Union[str, Any] = [to_numpy_array(_lowerCamelCase ) for image in images]
if do_resize:
UpperCamelCase_: Union[str, Any] = [self.resize(image=_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase ) for image in images]
if do_normalize:
UpperCamelCase_: Optional[Any] = [self.normalize(image=_lowerCamelCase ) for image in images]
if do_color_quantize:
UpperCamelCase_: Any = [to_channel_dimension_format(_lowerCamelCase , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
UpperCamelCase_: Optional[Any] = np.array(_lowerCamelCase )
UpperCamelCase_: Optional[Any] = color_quantize(_lowerCamelCase , _lowerCamelCase ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
UpperCamelCase_: Dict = images.shape[0]
UpperCamelCase_: Any = images.reshape(_lowerCamelCase , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
UpperCamelCase_: List[Any] = list(_lowerCamelCase )
else:
UpperCamelCase_: int = [to_channel_dimension_format(_lowerCamelCase , _lowerCamelCase ) for image in images]
UpperCamelCase_: str = {'input_ids': images}
return BatchFeature(data=_lowerCamelCase , tensor_type=_lowerCamelCase )
| 57 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.