code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def _a (lowercase__ : str , lowercase__ : str , lowercase__ : Optional[str] = None ) -> str:
"""simple docstring"""
if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release:
# old versions of hfh don't url-encode the file path
__snake_case = quote(lowercase__ )
return hfh.hf_hub_url(lowercase__ , lowercase__ , repo_type='dataset' , revision=lowercase__ )
| 56 |
'''simple docstring'''
from __future__ import annotations
def _a (lowercase__ : int , lowercase__ : int ) -> list[str]:
"""simple docstring"""
if partitions <= 0:
raise ValueError('partitions must be a positive number!' )
if partitions > number_of_bytes:
raise ValueError('partitions can not > number_of_bytes!' )
__snake_case = number_of_bytes // partitions
__snake_case = []
for i in range(lowercase__ ):
__snake_case = i * bytes_per_partition + 1
__snake_case = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_a : Tuple = {
"configuration_owlvit": [
"OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"OwlViTConfig",
"OwlViTOnnxConfig",
"OwlViTTextConfig",
"OwlViTVisionConfig",
],
"processing_owlvit": ["OwlViTProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[Any] = ["OwlViTFeatureExtractor"]
_a : Union[str, Any] = ["OwlViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = [
"OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OwlViTModel",
"OwlViTPreTrainedModel",
"OwlViTTextModel",
"OwlViTVisionModel",
"OwlViTForObjectDetection",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
_a : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 56 |
'''simple docstring'''
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class _lowercase ( __lowercase ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.0_1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1000 ) -> Tuple:
__snake_case = p_stop
__snake_case = max_length
def __iter__( self : Any ) -> Union[str, Any]:
__snake_case = 0
__snake_case = False
while not stop and count < self.max_length:
yield count
count += 1
__snake_case = random.random() < self.p_stop
class _lowercase ( unittest.TestCase ):
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : str=False , SCREAMING_SNAKE_CASE_ : str=True ) -> Union[str, Any]:
__snake_case = [
BatchSamplerShard(SCREAMING_SNAKE_CASE_ , 2 , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
for i in range(2 )
]
__snake_case = [list(SCREAMING_SNAKE_CASE_ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(SCREAMING_SNAKE_CASE_ ) for shard in batch_sampler_shards] , [len(SCREAMING_SNAKE_CASE_ ) for e in expected] )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Tuple ) -> str:
# Check the shards when the dataset is a round multiple of total batch size.
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] ) -> Union[str, Any]:
# Check the shards when the dataset is a round multiple of batch size.
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size.
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
def a ( self : str ) -> str:
# Check the shards when the dataset is a round multiple of total batch size.
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[[0, 1]], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
def a ( self : int ) -> Tuple:
# Check the shards when the dataset is a round multiple of batch size.
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size.
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[[0, 1]], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[int] ) -> Tuple:
__snake_case = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
__snake_case = [BatchSamplerShard(SCREAMING_SNAKE_CASE_ , 2 , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int=False , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : int=False ) -> List[Any]:
random.seed(SCREAMING_SNAKE_CASE_ )
__snake_case = list(SCREAMING_SNAKE_CASE_ )
__snake_case = [
IterableDatasetShard(
SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , drop_last=SCREAMING_SNAKE_CASE_ , num_processes=SCREAMING_SNAKE_CASE_ , process_index=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , )
for i in range(SCREAMING_SNAKE_CASE_ )
]
__snake_case = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(SCREAMING_SNAKE_CASE_ )
iterable_dataset_lists.append(list(SCREAMING_SNAKE_CASE_ ) )
__snake_case = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
__snake_case = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
self.assertTrue(len(SCREAMING_SNAKE_CASE_ ) % shard_batch_size == 0 )
__snake_case = []
for idx in range(0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(SCREAMING_SNAKE_CASE_ ) < len(SCREAMING_SNAKE_CASE_ ):
reference += reference
self.assertListEqual(SCREAMING_SNAKE_CASE_ , reference[: len(SCREAMING_SNAKE_CASE_ )] )
def a ( self : Dict ) -> Tuple:
__snake_case = 42
__snake_case = RandomIterableDataset()
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Edge case with a very small dataset
__snake_case = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] ) -> str:
__snake_case = BatchSampler(range(16 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = SkipBatchSampler(SCREAMING_SNAKE_CASE_ , 2 )
self.assertListEqual(list(SCREAMING_SNAKE_CASE_ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a ( self : str ) -> Union[str, Any]:
__snake_case = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a ( self : Any ) -> str:
__snake_case = DataLoader(list(range(16 ) ) , batch_size=4 )
__snake_case = skip_first_batches(SCREAMING_SNAKE_CASE_ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a ( self : Dict ) -> Optional[Any]:
__snake_case = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def a ( self : Tuple ) -> Dict:
Accelerator()
__snake_case = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 56 | 1 |
'''simple docstring'''
import cva
import numpy as np
class _lowercase :
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : int ) -> int:
if k in (0.0_4, 0.0_6):
__snake_case = k
__snake_case = window_size
else:
raise ValueError('invalid k value' )
def __str__( self : List[str] ) -> str:
return str(self.k )
def a ( self : Any , SCREAMING_SNAKE_CASE_ : str ) -> tuple[cva.Mat, list[list[int]]]:
__snake_case = cva.imread(SCREAMING_SNAKE_CASE_ , 0 )
__snake_case , __snake_case = img.shape
__snake_case = []
__snake_case = img.copy()
__snake_case = cva.cvtColor(SCREAMING_SNAKE_CASE_ , cva.COLOR_GRAY2RGB )
__snake_case , __snake_case = np.gradient(SCREAMING_SNAKE_CASE_ )
__snake_case = dx**2
__snake_case = dy**2
__snake_case = dx * dy
__snake_case = 0.0_4
__snake_case = self.window_size // 2
for y in range(SCREAMING_SNAKE_CASE_ , h - offset ):
for x in range(SCREAMING_SNAKE_CASE_ , w - offset ):
__snake_case = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__snake_case = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__snake_case = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__snake_case = (wxx * wyy) - (wxy**2)
__snake_case = wxx + wyy
__snake_case = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
_a : int = HarrisCorner(0.04, 3)
_a , _a : Dict = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 56 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
_a : int = get_tests_dir("fixtures/test_sentencepiece.model")
_a : Dict = {"target_lang": "fi", "source_lang": "en"}
_a : Optional[int] = ">>zh<<"
_a : List[str] = "Helsinki-NLP/"
if is_torch_available():
_a : List[str] = "pt"
elif is_tf_available():
_a : Dict = "tf"
else:
_a : Union[str, Any] = "jax"
@require_sentencepiece
class _lowercase ( __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : int = MarianTokenizer
_SCREAMING_SNAKE_CASE : str = False
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
def a ( self : int ) -> int:
super().setUp()
__snake_case = ['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>']
__snake_case = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
__snake_case = Path(self.tmpdirname )
save_json(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['vocab'] )
save_json(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['tokenizer_config_file'] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['source_spm'] )
copyfile(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['target_spm'] )
__snake_case = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def a ( self : int , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> MarianTokenizer:
return MarianTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def a ( self : str , SCREAMING_SNAKE_CASE_ : List[str] ) -> List[Any]:
return (
"This is a test",
"This is a test",
)
def a ( self : int ) -> Optional[Any]:
__snake_case = '</s>'
__snake_case = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def a ( self : Dict ) -> List[str]:
__snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '</s>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '<pad>' )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 9 )
def a ( self : List[Any] ) -> str:
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def a ( self : Any ) -> Optional[int]:
__snake_case = MarianTokenizer.from_pretrained(f'{ORG_NAME}opus-mt-en-de' )
__snake_case = en_de_tokenizer(['I am a small frog'] , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = [38, 121, 14, 697, 3_8848, 0]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , batch.input_ids[0] )
__snake_case = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
__snake_case = [x.name for x in Path(SCREAMING_SNAKE_CASE_ ).glob('*' )]
self.assertIn('source.spm' , SCREAMING_SNAKE_CASE_ )
MarianTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[int] ) -> Any:
__snake_case = self.get_tokenizer()
__snake_case = tok(
['I am a small frog' * 1000, 'I am a small frog'] , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def a ( self : Tuple ) -> Dict:
__snake_case = self.get_tokenizer()
__snake_case = tok(['I am a tiny frog', 'I am a small frog'] , padding=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def a ( self : int ) -> int:
# fmt: off
__snake_case = {'input_ids': [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE_ , model_name='Helsinki-NLP/opus-mt-en-de' , revision='1a8c2263da11e68e50938f97e10cd57820bd504c' , decode_kwargs={'use_source_tokenizer': True} , )
def a ( self : Dict ) -> str:
__snake_case = MarianTokenizer.from_pretrained('hf-internal-testing/test-marian-two-vocabs' )
__snake_case = 'Tämä on testi'
__snake_case = 'This is a test'
__snake_case = [76, 7, 2047, 2]
__snake_case = [69, 12, 11, 940, 2]
__snake_case = tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer(text_target=SCREAMING_SNAKE_CASE_ ).input_ids
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 56 | 1 |
'''simple docstring'''
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_a : List[str] = 2
class _lowercase :
def __init__( self : Dict , *, # begin keyword-only arguments
SCREAMING_SNAKE_CASE_ : Union[str, Any]="<s>" , SCREAMING_SNAKE_CASE_ : Any="<pad>" , SCREAMING_SNAKE_CASE_ : Optional[int]="</s>" , SCREAMING_SNAKE_CASE_ : str="<unk>" , SCREAMING_SNAKE_CASE_ : int=None , ) -> List[str]:
__snake_case , __snake_case , __snake_case , __snake_case = bos, unk, pad, eos
__snake_case = []
__snake_case = []
__snake_case = {}
__snake_case = self.add_symbol(SCREAMING_SNAKE_CASE_ )
__snake_case = self.add_symbol(SCREAMING_SNAKE_CASE_ )
__snake_case = self.add_symbol(SCREAMING_SNAKE_CASE_ )
__snake_case = self.add_symbol(SCREAMING_SNAKE_CASE_ )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(SCREAMING_SNAKE_CASE_ )
__snake_case = len(self.symbols )
def __eq__( self : Tuple , SCREAMING_SNAKE_CASE_ : List[str] ) -> Optional[int]:
return self.indices == other.indices
def __getitem__( self : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Tuple:
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : str ) -> Union[str, Any]:
return len(self.symbols )
def __contains__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str ) -> Dict:
return sym in self.indices
@classmethod
def a ( cls : Any , SCREAMING_SNAKE_CASE_ : int ) -> int:
__snake_case = cls()
d.add_from_file(SCREAMING_SNAKE_CASE_ )
return d
def a ( self : str , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : str=1 , SCREAMING_SNAKE_CASE_ : Optional[int]=False ) -> List[Any]:
if word in self.indices and not overwrite:
__snake_case = self.indices[word]
__snake_case = self.count[idx] + n
return idx
else:
__snake_case = len(self.symbols )
__snake_case = idx
self.symbols.append(SCREAMING_SNAKE_CASE_ )
self.count.append(SCREAMING_SNAKE_CASE_ )
return idx
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Optional[Any]:
return 0
def a ( self : List[str] , SCREAMING_SNAKE_CASE_ : str ) -> List[str]:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
try:
with open(SCREAMING_SNAKE_CASE_ , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(SCREAMING_SNAKE_CASE_ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(SCREAMING_SNAKE_CASE_ ) )
return
__snake_case = f.readlines()
__snake_case = self._load_meta(SCREAMING_SNAKE_CASE_ )
for line in lines[indices_start_line:]:
try:
__snake_case , __snake_case = line.rstrip().rsplit(' ' , 1 )
if field == "#fairseq:overwrite":
__snake_case = True
__snake_case , __snake_case = line.rsplit(' ' , 1 )
else:
__snake_case = False
__snake_case = int(SCREAMING_SNAKE_CASE_ )
__snake_case = line
if word in self and not overwrite:
raise RuntimeError(
'Duplicate word found when loading Dictionary: \'{}\'. '
'Duplicate words can overwrite earlier ones by adding the '
'#fairseq:overwrite flag at the end of the corresponding row '
'in the dictionary file. If using the Camembert model, please '
'download an updated copy of the model file.'.format(SCREAMING_SNAKE_CASE_ ) )
self.add_symbol(SCREAMING_SNAKE_CASE_ , n=SCREAMING_SNAKE_CASE_ , overwrite=SCREAMING_SNAKE_CASE_ )
except ValueError:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt> [flags]\'' )
def _a (lowercase__ : List[str] ) -> List[str]:
"""simple docstring"""
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
__snake_case = dict((re.sub(r'@@$' , '' , lowercase__ ), v) if k.endswith('@@' ) else (re.sub(r'$' , '</w>' , lowercase__ ), v) for k, v in d.items() )
__snake_case = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[f'{k}</w>']
__snake_case = d[k] # restore
return da
def _a (lowercase__ : int , lowercase__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
# prep
if not os.path.exists(lowercase__ ):
raise ValueError(f'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(lowercase__ , exist_ok=lowercase__ )
print(f'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
__snake_case = os.path.join(lowercase__ , 'checkpoint.pt' )
if not os.path.isfile(lowercase__ ):
raise ValueError(f'path to the file {checkpoint_file} does not exist!' )
__snake_case = torch.load(lowercase__ , map_location='cpu' )
__snake_case = chkpt['cfg']['model']
# dicts
__snake_case = os.path.join(lowercase__ , 'dict.txt' )
if not os.path.isfile(lowercase__ ):
raise ValueError(f'path to the file {dict_file} does not exist!' )
__snake_case = Dictionary.load(lowercase__ )
__snake_case = rewrite_dict_keys(src_dict.indices )
__snake_case = len(lowercase__ )
__snake_case = os.path.join(lowercase__ , VOCAB_FILES_NAMES['vocab_file'] )
print(f'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(lowercase__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(lowercase__ , ensure_ascii=lowercase__ , indent=lowercase__ ) )
# merges_file (bpecodes)
__snake_case = os.path.join(lowercase__ , 'bpecodes' )
if not os.path.isfile(lowercase__ ):
raise ValueError(f'path to the file {bpecodes_file} does not exist!' )
__snake_case = os.path.join(lowercase__ , VOCAB_FILES_NAMES['merges_file'] )
shutil.copyfile(lowercase__ , lowercase__ )
# model config
__snake_case = os.path.join(lowercase__ , 'config.json' )
__snake_case = {
'activation_dropout': args['activation_dropout'],
'architectures': ['BioGptForCausalLM'],
'attention_probs_dropout_prob': args['attention_dropout'],
'bos_token_id': 0,
'eos_token_id': 2,
'hidden_act': args['activation_fn'],
'hidden_dropout_prob': args['dropout'],
'hidden_size': args['decoder_embed_dim'],
'initializer_range': 0.02,
'intermediate_size': args['decoder_ffn_embed_dim'],
'layer_norm_eps': 1e-12,
'layerdrop': args['decoder_layerdrop'],
'max_position_embeddings': args['max_target_positions'],
'model_type': 'biogpt',
'num_attention_heads': args['decoder_attention_heads'],
'num_hidden_layers': args['decoder_layers'],
'pad_token_id': 1,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_decoder_input_output_embed'],
'vocab_size': src_vocab_size,
}
# good hparam defaults to start with
print(f'Generating {biogpt_model_config_file}' )
with open(lowercase__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(lowercase__ , ensure_ascii=lowercase__ , indent=lowercase__ ) )
# tokenizer config
__snake_case = os.path.join(lowercase__ , lowercase__ )
__snake_case = {
'bos_token': '<s>',
'eos_token': '</s>',
'model_max_length': 1_0_2_4,
'pad_token': '<pad>',
'special_tokens_map_file': None,
'tokenizer_class': 'BioGptTokenizer',
'unk_token': '<unk>',
}
print(f'Generating {biogpt_tokenizer_config_file}' )
with open(lowercase__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(lowercase__ , ensure_ascii=lowercase__ , indent=lowercase__ ) )
# model
__snake_case = chkpt['model']
# remove unneeded keys
__snake_case = [
'decoder.version',
]
for k in ignore_keys:
model_state_dict.pop(lowercase__ , lowercase__ )
__snake_case = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('output_projection.weight' ):
__snake_case = model_state_dict.pop(lowercase__ )
else:
__snake_case = model_state_dict.pop(lowercase__ )
__snake_case = BioGptConfig.from_pretrained(lowercase__ )
__snake_case = BioGptForCausalLM(lowercase__ )
# check that it loads ok
model_new.load_state_dict(lowercase__ )
# save
__snake_case = os.path.join(lowercase__ , lowercase__ )
print(f'Generating {pytorch_weights_dump_path}' )
torch.save(lowercase__ , lowercase__ )
print('Conversion is done!' )
if __name__ == "__main__":
_a : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--biogpt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_a : Dict = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 56 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def _a (lowercase__ : bytes ) -> bytes:
"""simple docstring"""
if len(lowercase__ ) != 3_2:
raise ValueError('Input must be of length 32' )
__snake_case = B''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def _a (lowercase__ : int ) -> bytes:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
__snake_case = format(lowercase__ , '08x' )[-8:]
__snake_case = B''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def _a (lowercase__ : bytes ) -> bytes:
"""simple docstring"""
__snake_case = B''
for char in message:
bit_string += format(lowercase__ , '08b' ).encode('utf-8' )
__snake_case = format(len(lowercase__ ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(lowercase__ ) % 5_1_2 != 4_4_8:
bit_string += b"0"
bit_string += to_little_endian(start_len[3_2:] ) + to_little_endian(start_len[:3_2] )
return bit_string
def _a (lowercase__ : bytes ) -> Generator[list[int], None, None]:
"""simple docstring"""
if len(lowercase__ ) % 5_1_2 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(lowercase__ ) , 5_1_2 ):
__snake_case = bit_string[pos : pos + 5_1_2]
__snake_case = []
for i in range(0 , 5_1_2 , 3_2 ):
block_words.append(int(to_little_endian(block[i : i + 3_2] ) , 2 ) )
yield block_words
def _a (lowercase__ : int ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
__snake_case = format(lowercase__ , '032b' )
__snake_case = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(lowercase__ , 2 )
def _a (lowercase__ : int , lowercase__ : int ) -> int:
"""simple docstring"""
return (a + b) % 2**3_2
def _a (lowercase__ : int , lowercase__ : int ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (3_2 - shift))) % 2**3_2
def _a (lowercase__ : bytes ) -> bytes:
"""simple docstring"""
__snake_case = preprocess(lowercase__ )
__snake_case = [int(2**3_2 * abs(sin(i + 1 ) ) ) for i in range(6_4 )]
# Starting states
__snake_case = 0x6_7_4_5_2_3_0_1
__snake_case = 0xE_F_C_D_A_B_8_9
__snake_case = 0x9_8_B_A_D_C_F_E
__snake_case = 0x1_0_3_2_5_4_7_6
__snake_case = [
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(lowercase__ ):
__snake_case = aa
__snake_case = ba
__snake_case = ca
__snake_case = da
# Hash current chunk
for i in range(6_4 ):
if i <= 1_5:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
__snake_case = d ^ (b & (c ^ d))
__snake_case = i
elif i <= 3_1:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
__snake_case = c ^ (d & (b ^ c))
__snake_case = (5 * i + 1) % 1_6
elif i <= 4_7:
__snake_case = b ^ c ^ d
__snake_case = (3 * i + 5) % 1_6
else:
__snake_case = c ^ (b | not_aa(lowercase__ ))
__snake_case = (7 * i) % 1_6
__snake_case = (f + a + added_consts[i] + block_words[g]) % 2**3_2
__snake_case = d
__snake_case = c
__snake_case = b
__snake_case = sum_aa(lowercase__ , left_rotate_aa(lowercase__ , shift_amounts[i] ) )
# Add hashed chunk to running total
__snake_case = sum_aa(lowercase__ , lowercase__ )
__snake_case = sum_aa(lowercase__ , lowercase__ )
__snake_case = sum_aa(lowercase__ , lowercase__ )
__snake_case = sum_aa(lowercase__ , lowercase__ )
__snake_case = reformat_hex(lowercase__ ) + reformat_hex(lowercase__ ) + reformat_hex(lowercase__ ) + reformat_hex(lowercase__ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 | 1 |
'''simple docstring'''
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
_a : Dict = "base_with_context"
def _a (lowercase__ : int , lowercase__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case = nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) )
__snake_case = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=lowercase__ )
for lyr_num, lyr in enumerate(model.encoders ):
__snake_case = weights[f'layers_{lyr_num}']
__snake_case = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
__snake_case = ly_weight['attention']
__snake_case = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
__snake_case = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
__snake_case = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
__snake_case = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
__snake_case = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
__snake_case = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
__snake_case = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
__snake_case = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
__snake_case = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def _a (lowercase__ : str , lowercase__ : Any ) -> Tuple:
"""simple docstring"""
__snake_case = nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) )
__snake_case = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=lowercase__ )
for lyr_num, lyr in enumerate(model.encoders ):
__snake_case = weights[f'layers_{lyr_num}']
__snake_case = ly_weight['attention']
__snake_case = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
__snake_case = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
__snake_case = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
__snake_case = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
__snake_case = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
__snake_case = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
__snake_case = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
__snake_case = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
__snake_case = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
__snake_case = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def _a (lowercase__ : Dict , lowercase__ : str ) -> int:
"""simple docstring"""
__snake_case = nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) )
__snake_case = nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) )
__snake_case = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=lowercase__ )
__snake_case = nn.Parameter(
torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
__snake_case = weights[f'layers_{lyr_num}']
__snake_case = nn.Parameter(
torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) )
__snake_case = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) )
__snake_case = ly_weight['self_attention']
__snake_case = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
__snake_case = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
__snake_case = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
__snake_case = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
__snake_case = ly_weight['MultiHeadDotProductAttention_0']
__snake_case = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
__snake_case = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
__snake_case = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
__snake_case = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
__snake_case = nn.Parameter(
torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) )
__snake_case = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
__snake_case = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) )
__snake_case = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
__snake_case = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
__snake_case = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
__snake_case = nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) )
__snake_case = nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) )
return model
def _a (lowercase__ : Tuple ) -> Tuple:
"""simple docstring"""
__snake_case = checkpoints.load_tax_checkpoint(args.checkpoint_path )
__snake_case = jnp.tree_util.tree_map(onp.array , lowercase__ )
__snake_case = [
'from __gin__ import dynamic_registration',
'from music_spectrogram_diffusion.models.diffusion import diffusion_utils',
'diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0',
'diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()',
]
__snake_case = os.path.join(args.checkpoint_path , '..' , 'config.gin' )
__snake_case = inference.parse_training_gin_file(lowercase__ , lowercase__ )
__snake_case = inference.InferenceModel(args.checkpoint_path , lowercase__ )
__snake_case = DDPMScheduler(beta_schedule='squaredcos_cap_v2' , variance_type='fixed_large' )
__snake_case = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['inputs'] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
__snake_case = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['targets_context'] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
__snake_case = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['targets_context'] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
__snake_case = load_notes_encoder(ta_checkpoint['target']['token_encoder'] , lowercase__ )
__snake_case = load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'] , lowercase__ )
__snake_case = load_decoder(ta_checkpoint['target']['decoder'] , lowercase__ )
__snake_case = OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' )
__snake_case = SpectrogramDiffusionPipeline(
notes_encoder=lowercase__ , continuous_encoder=lowercase__ , decoder=lowercase__ , scheduler=lowercase__ , melgan=lowercase__ , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
_a : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--output_path", default=None, type=str, required=True, help="Path to the converted model.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument(
"--checkpoint_path",
default=f'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help="Path to the original jax model checkpoint.",
)
_a : Union[str, Any] = parser.parse_args()
main(args)
| 56 |
'''simple docstring'''
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def _a (lowercase__ : str , lowercase__ : str , lowercase__ : Optional[str] = None ) -> str:
"""simple docstring"""
if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release:
# old versions of hfh don't url-encode the file path
__snake_case = quote(lowercase__ )
return hfh.hf_hub_url(lowercase__ , lowercase__ , repo_type='dataset' , revision=lowercase__ )
| 56 | 1 |
'''simple docstring'''
from PIL import Image
def _a (lowercase__ : Image ) -> Image:
"""simple docstring"""
__snake_case , __snake_case = image.size
__snake_case = 0
__snake_case = image.load()
for i in range(lowercase__ ):
for j in range(lowercase__ ):
__snake_case = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(lowercase__ ):
for i in range(lowercase__ ):
__snake_case = 2_5_5 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
_a : Dict = mean_threshold(Image.open("path_to_image").convert("L"))
image.save("output_image_path")
| 56 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def _a (lowercase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _lowercase ( nn.Module ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : nn.Module , SCREAMING_SNAKE_CASE_ : int ) -> str:
super().__init__()
__snake_case = module
__snake_case = nn.Sequential(
nn.Linear(module.in_features , SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ ) , nn.Linear(SCREAMING_SNAKE_CASE_ , module.out_features , bias=SCREAMING_SNAKE_CASE_ ) , )
__snake_case = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=SCREAMING_SNAKE_CASE_ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Union[str, Any]:
return self.module(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) + self.adapter(SCREAMING_SNAKE_CASE_ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowercase ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
_SCREAMING_SNAKE_CASE : Tuple = "bigscience/bloom-1b7"
# Constant values
_SCREAMING_SNAKE_CASE : Union[str, Any] = 2.109659552692574
_SCREAMING_SNAKE_CASE : Optional[Any] = "Hello my name is"
_SCREAMING_SNAKE_CASE : List[str] = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
_SCREAMING_SNAKE_CASE : Dict = 1_0
def a ( self : Optional[Any] ) -> List[Any]:
# Models and tokenizer
__snake_case = AutoTokenizer.from_pretrained(self.model_name )
class _lowercase ( __lowercase ):
def a ( self : Union[str, Any] ) -> List[str]:
super().setUp()
# Models and tokenizer
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto' )
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
def a ( self : Optional[Any] ) -> Any:
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def a ( self : Optional[Any] ) -> int:
__snake_case = self.model_abit.config
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'quantization_config' ) )
__snake_case = config.to_dict()
__snake_case = config.to_diff_dict()
__snake_case = config.to_json_string()
def a ( self : Optional[Any] ) -> str:
from bitsandbytes.nn import Paramsabit
__snake_case = self.model_fpaa.get_memory_footprint()
__snake_case = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
__snake_case = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def a ( self : Union[str, Any] ) -> Optional[Any]:
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(SCREAMING_SNAKE_CASE_ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def a ( self : Union[str, Any] ) -> int:
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
__snake_case = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) , self.EXPECTED_OUTPUTS )
def a ( self : Optional[Any] ) -> Dict:
__snake_case = BitsAndBytesConfig()
__snake_case = True
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
__snake_case = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) , self.EXPECTED_OUTPUTS )
def a ( self : List[Any] ) -> str:
with self.assertRaises(SCREAMING_SNAKE_CASE_ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(SCREAMING_SNAKE_CASE_ )
def a ( self : Any ) -> Union[str, Any]:
__snake_case = BitsAndBytesConfig()
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=SCREAMING_SNAKE_CASE_ , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' , bnb_abit_quant_type='nf4' , )
def a ( self : Tuple ) -> Dict:
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with `str`
self.model_abit.to('cpu' )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0' ) )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
__snake_case = self.model_fpaa.to(torch.floataa )
__snake_case = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
__snake_case = self.model_fpaa.to('cpu' )
# Check this does not throw an error
__snake_case = self.model_fpaa.half()
# Check this does not throw an error
__snake_case = self.model_fpaa.float()
def a ( self : Tuple ) -> Union[str, Any]:
__snake_case = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowercase ( unittest.TestCase ):
@classmethod
def a ( cls : Union[str, Any] ) -> Dict:
__snake_case = 't5-small'
__snake_case = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
__snake_case = AutoTokenizer.from_pretrained(cls.model_name )
__snake_case = 'Translate in German: Hello, my dog is cute'
def a ( self : List[Any] ) -> str:
gc.collect()
torch.cuda.empty_cache()
def a ( self : int ) -> Optional[Any]:
from transformers import TaForConditionalGeneration
__snake_case = TaForConditionalGeneration._keep_in_fpaa_modules
__snake_case = None
# test with `t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
# test with `flan-t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
__snake_case = modules
def a ( self : List[str] ) -> Any:
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
# test with `flan-t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
class _lowercase ( __lowercase ):
def a ( self : Dict ) -> str:
super().setUp()
# model_name
__snake_case = 'bigscience/bloom-560m'
__snake_case = 't5-small'
# Different types of model
__snake_case = AutoModel.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# Sequence classification model
__snake_case = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# CausalLM model
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# Seq2seq model
__snake_case = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
def a ( self : int ) -> Dict:
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def a ( self : Any ) -> Optional[Any]:
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class _lowercase ( __lowercase ):
def a ( self : str ) -> Union[str, Any]:
super().setUp()
def a ( self : Optional[Any] ) -> str:
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def a ( self : Optional[int] ) -> List[str]:
__snake_case = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__snake_case = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class _lowercase ( __lowercase ):
def a ( self : Optional[int] ) -> Union[str, Any]:
super().setUp()
def a ( self : Optional[int] ) -> List[Any]:
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='balanced' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
# Second real batch
__snake_case = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) , self.EXPECTED_OUTPUTS )
class _lowercase ( __lowercase ):
def a ( self : Any ) -> str:
__snake_case = 'facebook/opt-350m'
super().setUp()
def a ( self : int ) -> List[Any]:
if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ):
return
# Step 1: freeze all parameters
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
__snake_case = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__snake_case = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(SCREAMING_SNAKE_CASE_ ) ):
__snake_case = LoRALayer(module.q_proj , rank=16 )
__snake_case = LoRALayer(module.k_proj , rank=16 )
__snake_case = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
__snake_case = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__snake_case = model.forward(**SCREAMING_SNAKE_CASE_ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(SCREAMING_SNAKE_CASE_ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = "gpt2-xl"
_SCREAMING_SNAKE_CASE : Optional[int] = 3.3191854854152187
| 56 | 1 |
'''simple docstring'''
import math
from collections.abc import Callable
def _a (lowercase__ : Callable[[float], float] , lowercase__ : float , lowercase__ : float ) -> float:
"""simple docstring"""
__snake_case = xa
__snake_case = xa
while True:
if x_n == x_na or function(lowercase__ ) == function(lowercase__ ):
raise ZeroDivisionError('float division by zero, could not find root' )
__snake_case = x_na - (
function(lowercase__ ) / ((function(lowercase__ ) - function(lowercase__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 1_0**-5:
return x_na
__snake_case = x_na
__snake_case = x_na
def _a (lowercase__ : float ) -> float:
"""simple docstring"""
return math.pow(lowercase__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 56 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class _lowercase ( unittest.TestCase ):
def a ( self : int ) -> List[str]:
__snake_case = '| <pad> <unk> <s> </s> a b c d e f g h i j k'.split()
__snake_case = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
__snake_case = {
'unk_token': '<unk>',
'bos_token': '<s>',
'eos_token': '</s>',
}
__snake_case = {
'feature_size': 1,
'padding_value': 0.0,
'sampling_rate': 1_6000,
'return_attention_mask': False,
'do_normalize': True,
}
__snake_case = tempfile.mkdtemp()
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__snake_case = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE_ )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
with open(self.feature_extraction_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
# load decoder from hub
__snake_case = 'hf-internal-testing/ngram-beam-search-decoder'
def a ( self : Optional[int] , **SCREAMING_SNAKE_CASE_ : Tuple ) -> Dict:
__snake_case = self.add_kwargs_tokens_map.copy()
kwargs.update(SCREAMING_SNAKE_CASE_ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] , **SCREAMING_SNAKE_CASE_ : Any ) -> Optional[Any]:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def a ( self : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> Tuple:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **SCREAMING_SNAKE_CASE_ )
def a ( self : int ) -> Dict:
shutil.rmtree(self.tmpdirname )
def a ( self : int ) -> Tuple:
__snake_case = self.get_tokenizer()
__snake_case = self.get_feature_extractor()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
__snake_case = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE_ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , SCREAMING_SNAKE_CASE_ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , SCREAMING_SNAKE_CASE_ )
def a ( self : Dict ) -> Union[str, Any]:
__snake_case = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__snake_case = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def a ( self : str ) -> Tuple:
__snake_case = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['xx'] )
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE_ , 'include' ):
WavaVecaProcessorWithLM(
tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def a ( self : List[str] ) -> List[str]:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = floats_list((3, 1000) )
__snake_case = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
__snake_case = processor(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def a ( self : Tuple ) -> Tuple:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = 'This is a test string'
__snake_case = processor(text=SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer(SCREAMING_SNAKE_CASE_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a ( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any]=(2, 10, 16) , SCREAMING_SNAKE_CASE_ : Dict=77 ) -> Dict:
np.random.seed(SCREAMING_SNAKE_CASE_ )
return np.random.rand(*SCREAMING_SNAKE_CASE_ )
def a ( self : Any ) -> Tuple:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits(shape=(10, 16) , seed=13 )
__snake_case = processor.decode(SCREAMING_SNAKE_CASE_ )
__snake_case = decoder.decode_beams(SCREAMING_SNAKE_CASE_ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('</s> <s> </s>' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['fork'], ['spawn']] )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] ) -> Dict:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__snake_case = processor.batch_decode(SCREAMING_SNAKE_CASE_ )
else:
with get_context(SCREAMING_SNAKE_CASE_ ).Pool() as pool:
__snake_case = processor.batch_decode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = list(SCREAMING_SNAKE_CASE_ )
with get_context('fork' ).Pool() as p:
__snake_case = decoder.decode_beams_batch(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case , __snake_case , __snake_case = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.text )
self.assertListEqual(['<s> <s> </s>', '<s> <s> <s>'] , decoded_processor.text )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.logit_score )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.lm_score )
def a ( self : Any ) -> Dict:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits()
__snake_case = 15
__snake_case = -2_0.0
__snake_case = -4.0
__snake_case = processor.batch_decode(
SCREAMING_SNAKE_CASE_ , beam_width=SCREAMING_SNAKE_CASE_ , beam_prune_logp=SCREAMING_SNAKE_CASE_ , token_min_logp=SCREAMING_SNAKE_CASE_ , )
__snake_case = decoded_processor_out.text
__snake_case = list(SCREAMING_SNAKE_CASE_ )
with get_context('fork' ).Pool() as pool:
__snake_case = decoder.decode_beams_batch(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , beam_width=SCREAMING_SNAKE_CASE_ , beam_prune_logp=SCREAMING_SNAKE_CASE_ , token_min_logp=SCREAMING_SNAKE_CASE_ , )
__snake_case = [d[0][0] for d in decoded_decoder_out]
__snake_case = [d[0][2] for d in decoded_decoder_out]
__snake_case = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(['</s> <s> <s>', '<s> <s> <s>'] , SCREAMING_SNAKE_CASE_ )
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
def a ( self : Optional[Any] ) -> Tuple:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits()
__snake_case = 2.0
__snake_case = 5.0
__snake_case = -2_0.0
__snake_case = True
__snake_case = processor.batch_decode(
SCREAMING_SNAKE_CASE_ , alpha=SCREAMING_SNAKE_CASE_ , beta=SCREAMING_SNAKE_CASE_ , unk_score_offset=SCREAMING_SNAKE_CASE_ , lm_score_boundary=SCREAMING_SNAKE_CASE_ , )
__snake_case = decoded_processor_out.text
__snake_case = list(SCREAMING_SNAKE_CASE_ )
decoder.reset_params(
alpha=SCREAMING_SNAKE_CASE_ , beta=SCREAMING_SNAKE_CASE_ , unk_score_offset=SCREAMING_SNAKE_CASE_ , lm_score_boundary=SCREAMING_SNAKE_CASE_ , )
with get_context('fork' ).Pool() as pool:
__snake_case = decoder.decode_beams_batch(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
__snake_case = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(['<s> </s> <s> </s> </s>', '</s> </s> <s> </s> </s>'] , SCREAMING_SNAKE_CASE_ )
__snake_case = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -2_0.0 )
self.assertEqual(lm_model.score_boundary , SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] ) -> List[str]:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = processor.decoder.model_container[processor.decoder._model_key]
__snake_case = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
__snake_case = os.listdir(SCREAMING_SNAKE_CASE_ )
__snake_case = ['alphabet.json', 'language_model']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Dict ) -> Dict:
__snake_case = snapshot_download('hf-internal-testing/processor_with_lm' )
__snake_case = WavaVecaProcessorWithLM.from_pretrained(SCREAMING_SNAKE_CASE_ )
__snake_case = processor.decoder.model_container[processor.decoder._model_key]
__snake_case = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
__snake_case = os.listdir(SCREAMING_SNAKE_CASE_ )
__snake_case = os.listdir(SCREAMING_SNAKE_CASE_ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Any ) -> List[Any]:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = AutoProcessor.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = floats_list((3, 1000) )
__snake_case = processor_wavaveca(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
__snake_case = processor_auto(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
__snake_case = self._get_dummy_logits()
__snake_case = processor_wavaveca.batch_decode(SCREAMING_SNAKE_CASE_ )
__snake_case = processor_auto.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def a ( self : Dict ) -> Optional[int]:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
@staticmethod
def a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int:
__snake_case = [d[key] for d in offsets]
return retrieved_list
def a ( self : Optional[int] ) -> str:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = self._get_dummy_logits()[0]
__snake_case = processor.decode(SCREAMING_SNAKE_CASE_ , output_word_offsets=SCREAMING_SNAKE_CASE_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(' '.join(self.get_from_offsets(outputs['word_offsets'] , 'word' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'end_offset' ) , [1, 3, 5] )
def a ( self : Optional[Any] ) -> Optional[int]:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = self._get_dummy_logits()
__snake_case = processor.batch_decode(SCREAMING_SNAKE_CASE_ , output_word_offsets=SCREAMING_SNAKE_CASE_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(
[' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) for o in outputs['word_offsets']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'end_offset' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def a ( self : Optional[Any] ) -> Optional[Any]:
import torch
__snake_case = load_dataset('common_voice' , 'en' , split='train' , streaming=SCREAMING_SNAKE_CASE_ )
__snake_case = ds.cast_column('audio' , datasets.Audio(sampling_rate=1_6000 ) )
__snake_case = iter(SCREAMING_SNAKE_CASE_ )
__snake_case = next(SCREAMING_SNAKE_CASE_ )
__snake_case = AutoProcessor.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
__snake_case = WavaVecaForCTC.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__snake_case = processor(sample['audio']['array'] , return_tensors='pt' ).input_values
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ ).logits.cpu().numpy()
__snake_case = processor.decode(logits[0] , output_word_offsets=SCREAMING_SNAKE_CASE_ )
__snake_case = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__snake_case = [
{
'start_time': d['start_offset'] * time_offset,
'end_time': d['end_offset'] * time_offset,
'word': d['word'],
}
for d in output['word_offsets']
]
__snake_case = 'WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'
# output words
self.assertEqual(' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) , output.text )
# output times
__snake_case = torch.tensor(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'start_time' ) )
__snake_case = torch.tensor(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'end_time' ) )
# fmt: off
__snake_case = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
__snake_case = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=0.0_1 ) )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=0.0_1 ) )
| 56 | 1 |
'''simple docstring'''
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
_a : List[Any] = logging.get_logger(__name__)
def _a (lowercase__ : int , lowercase__ : Optional[Any] ) -> Dict:
"""simple docstring"""
try:
with open(lowercase__ , 'rb' ) as flax_state_f:
__snake_case = from_bytes(lowercase__ , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(lowercase__ ) as f:
if f.read().startswith('version' ):
raise OSError(
'You seem to have cloned a repository without having git-lfs installed. Please'
' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'
' folder you cloned.' )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(f'Unable to convert {model_file} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(lowercase__ , lowercase__ )
def _a (lowercase__ : int , lowercase__ : Dict ) -> Tuple:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
# check if we have bf16 weights
__snake_case = flatten_dict(jax.tree_util.tree_map(lambda lowercase__ : x.dtype == jnp.bfloataa , lowercase__ ) ).values()
if any(lowercase__ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '
'before loading those in PyTorch model.' )
__snake_case = jax.tree_util.tree_map(
lambda lowercase__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , lowercase__ )
__snake_case = ''
__snake_case = flatten_dict(lowercase__ , sep='.' )
__snake_case = pt_model.state_dict()
# keep track of unexpected & missing keys
__snake_case = []
__snake_case = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
__snake_case = flax_key_tuple.split('.' )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
__snake_case = flax_key_tuple_array[:-1] + ['weight']
__snake_case = jnp.transpose(lowercase__ , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
__snake_case = flax_key_tuple_array[:-1] + ['weight']
__snake_case = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
__snake_case = flax_key_tuple_array[:-1] + ['weight']
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(lowercase__ ):
__snake_case = (
flax_key_tuple_string.replace('_0' , '.0' )
.replace('_1' , '.1' )
.replace('_2' , '.2' )
.replace('_3' , '.3' )
.replace('_4' , '.4' )
.replace('_5' , '.5' )
.replace('_6' , '.6' )
.replace('_7' , '.7' )
.replace('_8' , '.8' )
.replace('_9' , '.9' )
)
__snake_case = '.'.join(lowercase__ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
f'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
__snake_case = np.asarray(lowercase__ ) if not isinstance(lowercase__ , np.ndarray ) else flax_tensor
__snake_case = torch.from_numpy(lowercase__ )
# remove from missing keys
missing_keys.remove(lowercase__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(lowercase__ )
pt_model.load_state_dict(lowercase__ )
# re-transform missing_keys to list
__snake_case = list(lowercase__ )
if len(lowercase__ ) > 0:
logger.warning(
'Some weights of the Flax model were not used when initializing the PyTorch model'
f' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
f' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'
f' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'
' FlaxBertForSequenceClassification model).' )
if len(lowercase__ ) > 0:
logger.warning(
f'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
f' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
' use it for predictions and inference.' )
return pt_model
| 56 |
'''simple docstring'''
def _a (lowercase__ : int , lowercase__ : int ) -> float:
"""simple docstring"""
return base * power(lowercase__ , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print("Raise base to the power of exponent using recursion...")
_a : Union[str, Any] = int(input("Enter the base: ").strip())
_a : Any = int(input("Enter the exponent: ").strip())
_a : List[str] = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
_a : List[Any] = 1 / result
print(f'''{base} to the power of {exponent} is {result}''')
| 56 | 1 |
'''simple docstring'''
import os
def _a () -> List[str]:
"""simple docstring"""
with open(os.path.dirname(lowercase__ ) + '/p022_names.txt' ) as file:
__snake_case = str(file.readlines()[0] )
__snake_case = names.replace('"' , '' ).split(',' )
names.sort()
__snake_case = 0
__snake_case = 0
for i, name in enumerate(lowercase__ ):
for letter in name:
name_score += ord(lowercase__ ) - 6_4
total_score += (i + 1) * name_score
__snake_case = 0
return total_score
if __name__ == "__main__":
print(solution())
| 56 |
'''simple docstring'''
import math
from collections.abc import Callable
def _a (lowercase__ : Callable[[float], float] , lowercase__ : float , lowercase__ : float ) -> float:
"""simple docstring"""
__snake_case = xa
__snake_case = xa
while True:
if x_n == x_na or function(lowercase__ ) == function(lowercase__ ):
raise ZeroDivisionError('float division by zero, could not find root' )
__snake_case = x_na - (
function(lowercase__ ) / ((function(lowercase__ ) - function(lowercase__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 1_0**-5:
return x_na
__snake_case = x_na
__snake_case = x_na
def _a (lowercase__ : float ) -> float:
"""simple docstring"""
return math.pow(lowercase__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 56 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
_a : List[Any] = logging.get_logger(__name__)
def _a (lowercase__ : Union[tf.Tensor, np.ndarray] ) -> List[int]:
"""simple docstring"""
if isinstance(lowercase__ , np.ndarray ):
return list(tensor.shape )
__snake_case = tf.shape(lowercase__ )
if tensor.shape == tf.TensorShape(lowercase__ ):
return dynamic
__snake_case = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(lowercase__ )]
def _a (lowercase__ : tf.Tensor , lowercase__ : Optional[int] = None , lowercase__ : Optional[str] = None ) -> tf.Tensor:
"""simple docstring"""
return tf.nn.softmax(logits=logits + 1e-9 , axis=lowercase__ , name=lowercase__ )
def _a (lowercase__ : Optional[Any] , lowercase__ : Tuple , lowercase__ : int , lowercase__ : int=1e-5 , lowercase__ : int=-1 ) -> int:
"""simple docstring"""
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(lowercase__ , lowercase__ ):
raise NotImplementedError('Only 1D weight and bias tensors are supported for now, with only a single axis.' )
# Get mean and variance on the axis to be normalized
__snake_case , __snake_case = tf.nn.moments(lowercase__ , axes=[axis] , keepdims=lowercase__ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
__snake_case = [1] * inputs.shape.rank
__snake_case = shape_list(lowercase__ )[axis]
__snake_case = tf.reshape(lowercase__ , lowercase__ )
__snake_case = tf.reshape(lowercase__ , lowercase__ )
# Compute layer normalization using the batch_normalization
# function.
__snake_case = tf.nn.batch_normalization(
lowercase__ , lowercase__ , lowercase__ , offset=lowercase__ , scale=lowercase__ , variance_epsilon=lowercase__ , )
return outputs
def _a (lowercase__ : str , lowercase__ : Optional[int]=0 , lowercase__ : List[str]=-1 ) -> int:
"""simple docstring"""
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
__snake_case = tf.shape(lowercase__ )
__snake_case = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
__snake_case = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(lowercase__ , lowercase__ )
def _a (lowercase__ : tf.Tensor ) -> tf.Tensor:
"""simple docstring"""
if not isinstance(lowercase__ , tf.Tensor ):
__snake_case = tf.convert_to_tensor(lowercase__ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
__snake_case = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
__snake_case = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
__snake_case = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def _a (lowercase__ : tf.Tensor , lowercase__ : int , lowercase__ : str = "input_ids" ) -> None:
"""simple docstring"""
tf.debugging.assert_less(
lowercase__ , tf.cast(lowercase__ , dtype=tensor.dtype ) , message=(
f'The maximum value of {tensor_name} ({tf.math.reduce_max(lowercase__ )}) must be smaller than the embedding '
f'layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'
) , )
def _a (lowercase__ : int , lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case = 6_4_5_1_2
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
__snake_case = [x for x in data if len(lowercase__ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'The following attributes cannot be saved to HDF5 file because '
f'they are larger than {HDF5_OBJECT_HEADER_LIMIT} '
f'bytes: {bad_attributes}' )
__snake_case = np.asarray(lowercase__ )
__snake_case = 1
__snake_case = np.array_split(lowercase__ , lowercase__ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
__snake_case = np.array_split(lowercase__ , lowercase__ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(lowercase__ ):
__snake_case = chunk_data
else:
__snake_case = data
def _a (lowercase__ : Dict , lowercase__ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
if name in group.attrs:
__snake_case = [n.decode('utf8' ) if hasattr(lowercase__ , 'decode' ) else n for n in group.attrs[name]]
else:
__snake_case = []
__snake_case = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('utf8' ) if hasattr(lowercase__ , 'decode' ) else n for n in group.attrs['%s%d' % (name, chunk_id)]] )
chunk_id += 1
return data
def _a (lowercase__ : str ) -> List[str]:
"""simple docstring"""
def _expand_single_ad_tensor(lowercase__ : str ):
if isinstance(lowercase__ , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(lowercase__ , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , lowercase__ )
| 56 |
'''simple docstring'''
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class _lowercase ( __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : str = CpmAntTokenizer
_SCREAMING_SNAKE_CASE : Optional[Any] = False
def a ( self : Optional[Any] ) -> Any:
super().setUp()
__snake_case = [
'<d>',
'</d>',
'<s>',
'</s>',
'</_>',
'<unk>',
'<pad>',
'</n>',
'我',
'是',
'C',
'P',
'M',
'A',
'n',
't',
]
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
@tooslow
def a ( self : List[Any] ) -> Dict:
__snake_case = CpmAntTokenizer.from_pretrained('openbmb/cpm-ant-10b' )
__snake_case = '今天天气真好!'
__snake_case = ['今天', '天气', '真', '好', '!']
__snake_case = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = '今天天气真好!'
__snake_case = [tokenizer.bos_token] + tokens
__snake_case = [6, 9802, 1_4962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 56 | 1 |
'''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
_a , _a , _a : Union[str, Any] = False, False, False
@dataclass
class _lowercase :
_SCREAMING_SNAKE_CASE : Optional[int] = None
_SCREAMING_SNAKE_CASE : bool = True
_SCREAMING_SNAKE_CASE : bool = True
_SCREAMING_SNAKE_CASE : Optional[str] = None
# Automatically constructed
_SCREAMING_SNAKE_CASE : ClassVar[str] = "dict"
_SCREAMING_SNAKE_CASE : ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
_SCREAMING_SNAKE_CASE : str = field(default="Audio" , init=__lowercase , repr=__lowercase )
def __call__( self : Optional[Any] ) -> Tuple:
return self.pa_type
def a ( self : int , SCREAMING_SNAKE_CASE_ : Union[str, bytes, dict] ) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('To support encoding audio data, please install \'soundfile\'.' ) from err
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return {"bytes": None, "path": value}
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
__snake_case = BytesIO()
sf.write(SCREAMING_SNAKE_CASE_ , value['array'] , value['sampling_rate'] , format='wav' )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('path' ) is not None and os.path.isfile(value['path'] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('pcm' ):
# "PCM" only has raw audio bytes
if value.get('sampling_rate' ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('To use PCM files, please specify a \'sampling_rate\' in Audio object' )
if value.get('bytes' ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
__snake_case = np.frombuffer(value['bytes'] , dtype=np.intaa ).astype(np.floataa ) / 3_2767
else:
__snake_case = np.memmap(value['path'] , dtype='h' , mode='r' ).astype(np.floataa ) / 3_2767
__snake_case = BytesIO(bytes() )
sf.write(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , value['sampling_rate'] , format='wav' )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('path' )}
elif value.get('bytes' ) is not None or value.get('path' ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('bytes' ), "path": value.get('path' )}
else:
raise ValueError(
f'An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def a ( self : Tuple , SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : Optional[Dict[str, Union[str, bool, None]]] = None ) -> dict:
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Audio(decode=True) instead.' )
__snake_case , __snake_case = (value['path'], BytesIO(value['bytes'] )) if value['bytes'] is not None else (value['path'], None)
if path is None and file is None:
raise ValueError(f'An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('To support decoding audio files, please install \'librosa\' and \'soundfile\'.' ) from err
__snake_case = xsplitext(SCREAMING_SNAKE_CASE_ )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '
'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ' )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '
'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ' )
if file is None:
__snake_case = token_per_repo_id or {}
__snake_case = path.split('::' )[-1]
try:
__snake_case = string_to_dict(SCREAMING_SNAKE_CASE_ , config.HUB_DATASETS_URL )['repo_id']
__snake_case = token_per_repo_id[repo_id]
except (ValueError, KeyError):
__snake_case = None
with xopen(SCREAMING_SNAKE_CASE_ , 'rb' , use_auth_token=SCREAMING_SNAKE_CASE_ ) as f:
__snake_case , __snake_case = sf.read(SCREAMING_SNAKE_CASE_ )
else:
__snake_case , __snake_case = sf.read(SCREAMING_SNAKE_CASE_ )
__snake_case = array.T
if self.mono:
__snake_case = librosa.to_mono(SCREAMING_SNAKE_CASE_ )
if self.sampling_rate and self.sampling_rate != sampling_rate:
__snake_case = librosa.resample(SCREAMING_SNAKE_CASE_ , orig_sr=SCREAMING_SNAKE_CASE_ , target_sr=self.sampling_rate )
__snake_case = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def a ( self : str ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError('Cannot flatten a decoded Audio feature.' )
return {
"bytes": Value('binary' ),
"path": Value('string' ),
}
def a ( self : Tuple , SCREAMING_SNAKE_CASE_ : Union[pa.StringArray, pa.StructArray] ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
__snake_case = pa.array([None] * len(SCREAMING_SNAKE_CASE_ ) , type=pa.binary() )
__snake_case = pa.StructArray.from_arrays([bytes_array, storage] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
__snake_case = pa.array([None] * len(SCREAMING_SNAKE_CASE_ ) , type=pa.string() )
__snake_case = pa.StructArray.from_arrays([storage, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('array' ):
__snake_case = pa.array([Audio().encode_example(SCREAMING_SNAKE_CASE_ ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('bytes' ) >= 0:
__snake_case = storage.field('bytes' )
else:
__snake_case = pa.array([None] * len(SCREAMING_SNAKE_CASE_ ) , type=pa.binary() )
if storage.type.get_field_index('path' ) >= 0:
__snake_case = storage.field('path' )
else:
__snake_case = pa.array([None] * len(SCREAMING_SNAKE_CASE_ ) , type=pa.string() )
__snake_case = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
return array_cast(SCREAMING_SNAKE_CASE_ , self.pa_type )
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : pa.StructArray ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(SCREAMING_SNAKE_CASE_ : int ):
with xopen(SCREAMING_SNAKE_CASE_ , 'rb' ) as f:
__snake_case = f.read()
return bytes_
__snake_case = pa.array(
[
(path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
__snake_case = pa.array(
[os.path.basename(SCREAMING_SNAKE_CASE_ ) if path is not None else None for path in storage.field('path' ).to_pylist()] , type=pa.string() , )
__snake_case = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() )
return array_cast(SCREAMING_SNAKE_CASE_ , self.pa_type )
| 56 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def _a (lowercase__ : list ) -> int:
"""simple docstring"""
if not postfix_notation:
return 0
__snake_case = {'+', '-', '*', '/'}
__snake_case = []
for token in postfix_notation:
if token in operations:
__snake_case , __snake_case = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(lowercase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 | 1 |
'''simple docstring'''
def _a (lowercase__ : str , lowercase__ : bool = False ) -> str:
"""simple docstring"""
if not isinstance(lowercase__ , lowercase__ ):
__snake_case = f'Expected string as input, found {type(lowercase__ )}'
raise ValueError(lowercase__ )
if not isinstance(lowercase__ , lowercase__ ):
__snake_case = f'Expected boolean as use_pascal parameter, found {type(lowercase__ )}'
raise ValueError(lowercase__ )
__snake_case = input_str.split('_' )
__snake_case = 0 if use_pascal else 1
__snake_case = words[start_index:]
__snake_case = [word[0].upper() + word[1:] for word in words_to_capitalize]
__snake_case = '' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 56 |
'''simple docstring'''
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square(lowercase__ : int , lowercase__ : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
__snake_case = update_area_of_max_square(lowercase__ , col + 1 )
__snake_case = update_area_of_max_square(row + 1 , col + 1 )
__snake_case = update_area_of_max_square(row + 1 , lowercase__ )
if mat[row][col]:
__snake_case = 1 + min([right, diagonal, down] )
__snake_case = max(largest_square_area[0] , lowercase__ )
return sub_problem_sol
else:
return 0
__snake_case = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
__snake_case = update_area_of_max_square_using_dp_array(lowercase__ , col + 1 , lowercase__ )
__snake_case = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , lowercase__ )
__snake_case = update_area_of_max_square_using_dp_array(row + 1 , lowercase__ , lowercase__ )
if mat[row][col]:
__snake_case = 1 + min([right, diagonal, down] )
__snake_case = max(largest_square_area[0] , lowercase__ )
__snake_case = sub_problem_sol
return sub_problem_sol
else:
return 0
__snake_case = [0]
__snake_case = [[-1] * cols for _ in range(lowercase__ )]
update_area_of_max_square_using_dp_array(0 , 0 , lowercase__ )
return largest_square_area[0]
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
"""simple docstring"""
__snake_case = [[0] * (cols + 1) for _ in range(rows + 1 )]
__snake_case = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__snake_case = dp_array[row][col + 1]
__snake_case = dp_array[row + 1][col + 1]
__snake_case = dp_array[row + 1][col]
if mat[row][col] == 1:
__snake_case = 1 + min(lowercase__ , lowercase__ , lowercase__ )
__snake_case = max(dp_array[row][col] , lowercase__ )
else:
__snake_case = 0
return largest_square_area
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
"""simple docstring"""
__snake_case = [0] * (cols + 1)
__snake_case = [0] * (cols + 1)
__snake_case = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__snake_case = current_row[col + 1]
__snake_case = next_row[col + 1]
__snake_case = next_row[col]
if mat[row][col] == 1:
__snake_case = 1 + min(lowercase__ , lowercase__ , lowercase__ )
__snake_case = max(current_row[col] , lowercase__ )
else:
__snake_case = 0
__snake_case = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 56 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
_a : Any = logging.get_logger(__name__)
_a : List[Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_a : Optional[int] = {
"vocab_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"
),
"squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt",
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli": (
"https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"
),
},
}
_a : List[Any] = {
"squeezebert/squeezebert-uncased": 512,
"squeezebert/squeezebert-mnli": 512,
"squeezebert/squeezebert-mnli-headless": 512,
}
_a : List[str] = {
"squeezebert/squeezebert-uncased": {"do_lower_case": True},
"squeezebert/squeezebert-mnli": {"do_lower_case": True},
"squeezebert/squeezebert-mnli-headless": {"do_lower_case": True},
}
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : int = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
_SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : List[Any] = SqueezeBertTokenizer
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any]=None , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : List[Any]="[UNK]" , SCREAMING_SNAKE_CASE_ : Tuple="[SEP]" , SCREAMING_SNAKE_CASE_ : List[Any]="[PAD]" , SCREAMING_SNAKE_CASE_ : Tuple="[CLS]" , SCREAMING_SNAKE_CASE_ : Tuple="[MASK]" , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : List[Any]=None , **SCREAMING_SNAKE_CASE_ : List[Any] , ) -> Union[str, Any]:
super().__init__(
SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
__snake_case = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , SCREAMING_SNAKE_CASE_ ) != do_lower_case
or normalizer_state.get('strip_accents' , SCREAMING_SNAKE_CASE_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , SCREAMING_SNAKE_CASE_ ) != tokenize_chinese_chars
):
__snake_case = getattr(SCREAMING_SNAKE_CASE_ , normalizer_state.pop('type' ) )
__snake_case = do_lower_case
__snake_case = strip_accents
__snake_case = tokenize_chinese_chars
__snake_case = normalizer_class(**SCREAMING_SNAKE_CASE_ )
__snake_case = do_lower_case
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any]=None ) -> List[str]:
__snake_case = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a ( self : List[str] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]:
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]:
__snake_case = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
| 56 |
'''simple docstring'''
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def _a () -> Union[str, Any]:
"""simple docstring"""
__snake_case = 1_0
__snake_case = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
__snake_case = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [9_7], 'text': ['1976']}] * 1_0,
'id': list(range(lowercase__ ) ),
} , features=lowercase__ , )
return dataset
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple , lowercase__ : Dict ) -> Tuple:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=lowercase__ )
return filename
# FILE_CONTENT + files
_a : Union[str, Any] = "\\n Text data.\n Second line of data."
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> List[str]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt'
__snake_case = FILE_CONTENT
with open(lowercase__ , 'w' ) as f:
f.write(lowercase__ )
return filename
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
import bza
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
__snake_case = bytes(lowercase__ , 'utf-8' )
with bza.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Union[str, Any] ) -> Dict:
"""simple docstring"""
import gzip
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
__snake_case = bytes(lowercase__ , 'utf-8' )
with gzip.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple ) -> Optional[int]:
"""simple docstring"""
if datasets.config.LZ4_AVAILABLE:
import lza.frame
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
__snake_case = bytes(lowercase__ , 'utf-8' )
with lza.frame.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Dict , lowercase__ : Tuple ) -> Tuple:
"""simple docstring"""
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(lowercase__ , 'w' ) as archive:
archive.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[int] , lowercase__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
import tarfile
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(lowercase__ , 'w' ) as f:
f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> Tuple:
"""simple docstring"""
import lzma
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
__snake_case = bytes(lowercase__ , 'utf-8' )
with lzma.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Dict , lowercase__ : str ) -> Union[str, Any]:
"""simple docstring"""
import zipfile
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> int:
"""simple docstring"""
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
__snake_case = bytes(lowercase__ , 'utf-8' )
with zstd.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.xml'
__snake_case = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(lowercase__ , 'w' ) as f:
f.write(lowercase__ )
return filename
_a : int = [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
_a : List[str] = [
{"col_1": "4", "col_2": 4, "col_3": 4.0},
{"col_1": "5", "col_2": 5, "col_3": 5.0},
]
_a : Tuple = {
"col_1": ["0", "1", "2", "3"],
"col_2": [0, 1, 2, 3],
"col_3": [0.0, 1.0, 2.0, 3.0],
}
_a : Optional[int] = [
{"col_3": 0.0, "col_1": "0", "col_2": 0},
{"col_3": 1.0, "col_1": "1", "col_2": 1},
]
_a : Any = [
{"col_1": "s0", "col_2": 0, "col_3": 0.0},
{"col_1": "s1", "col_2": 1, "col_3": 1.0},
{"col_1": "s2", "col_2": 2, "col_3": 2.0},
{"col_1": "s3", "col_2": 3, "col_3": 3.0},
]
@pytest.fixture(scope='session' )
def _a () -> Optional[Any]:
"""simple docstring"""
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case = datasets.Dataset.from_dict(lowercase__ )
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Any ) -> Dict:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(lowercase__ ) ) as con:
__snake_case = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> Tuple:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(lowercase__ , 'w' , newline='' ) as f:
__snake_case = csv.DictWriter(lowercase__ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> Tuple:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(lowercase__ , 'w' , newline='' ) as f:
__snake_case = csv.DictWriter(lowercase__ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : str , lowercase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
import bza
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(lowercase__ , 'rb' ) as f:
__snake_case = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] , lowercase__ : Union[str, Any] , lowercase__ : str ) -> Optional[int]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] , lowercase__ : Tuple , lowercase__ : int ) -> int:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(lowercase__ , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] , lowercase__ : Dict , lowercase__ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] ) -> int:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
__snake_case = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(lowercase__ , 'wb' ) as f:
__snake_case = pq.ParquetWriter(lowercase__ , schema=lowercase__ )
__snake_case = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowercase__ ) )] for k in DATA[0]} , schema=lowercase__ )
writer.write_table(lowercase__ )
writer.close()
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> List[str]:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
__snake_case = {'data': DATA}
with open(lowercase__ , 'w' ) as f:
json.dump(lowercase__ , lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] ) -> List[Any]:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
__snake_case = {'data': DATA_DICT_OF_LISTS}
with open(lowercase__ , 'w' ) as f:
json.dump(lowercase__ , lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(lowercase__ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowercase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(lowercase__ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowercase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : int ) -> Tuple:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(lowercase__ , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(lowercase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Dict ) -> int:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(lowercase__ , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(lowercase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : int , lowercase__ : List[Any] ) -> Dict:
"""simple docstring"""
import gzip
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(lowercase__ , 'rb' ) as orig_file:
with gzip.open(lowercase__ , 'wb' ) as zipped_file:
zipped_file.writelines(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Union[str, Any] , lowercase__ : Dict ) -> Optional[Any]:
"""simple docstring"""
import gzip
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(lowercase__ , 'rb' ) as orig_file:
with gzip.open(lowercase__ , 'wb' ) as zipped_file:
zipped_file.writelines(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : str , lowercase__ : str , lowercase__ : str ) -> Optional[int]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Dict , lowercase__ : Optional[int] , lowercase__ : List[Any] , lowercase__ : List[Any] ) -> str:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.join('nested' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : str ) -> Optional[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : str , lowercase__ : Optional[int] , lowercase__ : Tuple ) -> List[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(lowercase__ , 'w' ) as f:
f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple , lowercase__ : Tuple , lowercase__ : Dict , lowercase__ : int ) -> Optional[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(lowercase__ , 'w' ) as f:
f.add(lowercase__ , arcname=os.path.join('nested' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__snake_case = ['0', '1', '2', '3']
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(lowercase__ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] ) -> List[Any]:
"""simple docstring"""
__snake_case = ['0', '1', '2', '3']
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(lowercase__ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[int] ) -> Dict:
"""simple docstring"""
__snake_case = ['0', '1', '2', '3']
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(lowercase__ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] , lowercase__ : Union[str, Any] , lowercase__ : Any ) -> str:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple , lowercase__ : Any , lowercase__ : Tuple ) -> List[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] , lowercase__ : Optional[int] , lowercase__ : Any ) -> Union[str, Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename('unsupported.ext' ) )
f.write(lowercase__ , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Any ) -> List[Any]:
"""simple docstring"""
__snake_case = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(lowercase__ , 'w' , encoding='utf-8' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a () -> int:
"""simple docstring"""
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def _a () -> Optional[int]:
"""simple docstring"""
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] , lowercase__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : str ) -> List[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 1_0 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 1_0 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
return data_dir
| 56 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_a : Union[str, Any] = {
"configuration_perceiver": ["PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP", "PerceiverConfig", "PerceiverOnnxConfig"],
"tokenization_perceiver": ["PerceiverTokenizer"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[str] = ["PerceiverFeatureExtractor"]
_a : str = ["PerceiverImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Tuple = [
"PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PerceiverForImageClassificationConvProcessing",
"PerceiverForImageClassificationFourier",
"PerceiverForImageClassificationLearned",
"PerceiverForMaskedLM",
"PerceiverForMultimodalAutoencoding",
"PerceiverForOpticalFlow",
"PerceiverForSequenceClassification",
"PerceiverLayer",
"PerceiverModel",
"PerceiverPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
_a : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 56 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a : Optional[Any] = logging.get_logger(__name__)
_a : Tuple = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = "camembert"
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3_0522 , SCREAMING_SNAKE_CASE_ : str=768 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=12 , SCREAMING_SNAKE_CASE_ : Dict=12 , SCREAMING_SNAKE_CASE_ : Optional[Any]=3072 , SCREAMING_SNAKE_CASE_ : Tuple="gelu" , SCREAMING_SNAKE_CASE_ : List[str]=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=512 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : Any=0.0_2 , SCREAMING_SNAKE_CASE_ : Tuple=1e-12 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1 , SCREAMING_SNAKE_CASE_ : Dict=0 , SCREAMING_SNAKE_CASE_ : int=2 , SCREAMING_SNAKE_CASE_ : Dict="absolute" , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : Tuple=None , **SCREAMING_SNAKE_CASE_ : Dict , ) -> int:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = position_embedding_type
__snake_case = use_cache
__snake_case = classifier_dropout
class _lowercase ( __lowercase ):
@property
def a ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__snake_case = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 56 | 1 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def _a (lowercase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _lowercase ( nn.Module ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : nn.Module , SCREAMING_SNAKE_CASE_ : int ) -> str:
super().__init__()
__snake_case = module
__snake_case = nn.Sequential(
nn.Linear(module.in_features , SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ ) , nn.Linear(SCREAMING_SNAKE_CASE_ , module.out_features , bias=SCREAMING_SNAKE_CASE_ ) , )
__snake_case = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=SCREAMING_SNAKE_CASE_ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Union[str, Any]:
return self.module(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) + self.adapter(SCREAMING_SNAKE_CASE_ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowercase ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
_SCREAMING_SNAKE_CASE : Tuple = "bigscience/bloom-1b7"
# Constant values
_SCREAMING_SNAKE_CASE : Union[str, Any] = 2.109659552692574
_SCREAMING_SNAKE_CASE : Optional[Any] = "Hello my name is"
_SCREAMING_SNAKE_CASE : List[str] = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
_SCREAMING_SNAKE_CASE : Dict = 1_0
def a ( self : Optional[Any] ) -> List[Any]:
# Models and tokenizer
__snake_case = AutoTokenizer.from_pretrained(self.model_name )
class _lowercase ( __lowercase ):
def a ( self : Union[str, Any] ) -> List[str]:
super().setUp()
# Models and tokenizer
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto' )
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
def a ( self : Optional[Any] ) -> Any:
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def a ( self : Optional[Any] ) -> int:
__snake_case = self.model_abit.config
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'quantization_config' ) )
__snake_case = config.to_dict()
__snake_case = config.to_diff_dict()
__snake_case = config.to_json_string()
def a ( self : Optional[Any] ) -> str:
from bitsandbytes.nn import Paramsabit
__snake_case = self.model_fpaa.get_memory_footprint()
__snake_case = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
__snake_case = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def a ( self : Union[str, Any] ) -> Optional[Any]:
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(SCREAMING_SNAKE_CASE_ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def a ( self : Union[str, Any] ) -> int:
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
__snake_case = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) , self.EXPECTED_OUTPUTS )
def a ( self : Optional[Any] ) -> Dict:
__snake_case = BitsAndBytesConfig()
__snake_case = True
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
__snake_case = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) , self.EXPECTED_OUTPUTS )
def a ( self : List[Any] ) -> str:
with self.assertRaises(SCREAMING_SNAKE_CASE_ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(SCREAMING_SNAKE_CASE_ )
def a ( self : Any ) -> Union[str, Any]:
__snake_case = BitsAndBytesConfig()
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=SCREAMING_SNAKE_CASE_ , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' , bnb_abit_quant_type='nf4' , )
def a ( self : Tuple ) -> Dict:
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with `str`
self.model_abit.to('cpu' )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0' ) )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
__snake_case = self.model_fpaa.to(torch.floataa )
__snake_case = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
__snake_case = self.model_fpaa.to('cpu' )
# Check this does not throw an error
__snake_case = self.model_fpaa.half()
# Check this does not throw an error
__snake_case = self.model_fpaa.float()
def a ( self : Tuple ) -> Union[str, Any]:
__snake_case = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowercase ( unittest.TestCase ):
@classmethod
def a ( cls : Union[str, Any] ) -> Dict:
__snake_case = 't5-small'
__snake_case = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
__snake_case = AutoTokenizer.from_pretrained(cls.model_name )
__snake_case = 'Translate in German: Hello, my dog is cute'
def a ( self : List[Any] ) -> str:
gc.collect()
torch.cuda.empty_cache()
def a ( self : int ) -> Optional[Any]:
from transformers import TaForConditionalGeneration
__snake_case = TaForConditionalGeneration._keep_in_fpaa_modules
__snake_case = None
# test with `t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
# test with `flan-t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
__snake_case = modules
def a ( self : List[str] ) -> Any:
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
# test with `flan-t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
class _lowercase ( __lowercase ):
def a ( self : Dict ) -> str:
super().setUp()
# model_name
__snake_case = 'bigscience/bloom-560m'
__snake_case = 't5-small'
# Different types of model
__snake_case = AutoModel.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# Sequence classification model
__snake_case = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# CausalLM model
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# Seq2seq model
__snake_case = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
def a ( self : int ) -> Dict:
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def a ( self : Any ) -> Optional[Any]:
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class _lowercase ( __lowercase ):
def a ( self : str ) -> Union[str, Any]:
super().setUp()
def a ( self : Optional[Any] ) -> str:
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def a ( self : Optional[int] ) -> List[str]:
__snake_case = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__snake_case = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class _lowercase ( __lowercase ):
def a ( self : Optional[int] ) -> Union[str, Any]:
super().setUp()
def a ( self : Optional[int] ) -> List[Any]:
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='balanced' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
# Second real batch
__snake_case = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) , self.EXPECTED_OUTPUTS )
class _lowercase ( __lowercase ):
def a ( self : Any ) -> str:
__snake_case = 'facebook/opt-350m'
super().setUp()
def a ( self : int ) -> List[Any]:
if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ):
return
# Step 1: freeze all parameters
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
__snake_case = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__snake_case = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(SCREAMING_SNAKE_CASE_ ) ):
__snake_case = LoRALayer(module.q_proj , rank=16 )
__snake_case = LoRALayer(module.k_proj , rank=16 )
__snake_case = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
__snake_case = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__snake_case = model.forward(**SCREAMING_SNAKE_CASE_ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(SCREAMING_SNAKE_CASE_ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = "gpt2-xl"
_SCREAMING_SNAKE_CASE : Optional[int] = 3.3191854854152187
| 56 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : List[str] = logging.get_logger(__name__)
_a : Dict = {
"facebook/timesformer": "https://huggingface.co/facebook/timesformer/resolve/main/config.json",
}
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : int = "timesformer"
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : List[str]=224 , SCREAMING_SNAKE_CASE_ : List[str]=16 , SCREAMING_SNAKE_CASE_ : Any=3 , SCREAMING_SNAKE_CASE_ : int=8 , SCREAMING_SNAKE_CASE_ : Tuple=768 , SCREAMING_SNAKE_CASE_ : int=12 , SCREAMING_SNAKE_CASE_ : Optional[int]=12 , SCREAMING_SNAKE_CASE_ : Optional[int]=3072 , SCREAMING_SNAKE_CASE_ : Tuple="gelu" , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.0 , SCREAMING_SNAKE_CASE_ : List[Any]=0.0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0_2 , SCREAMING_SNAKE_CASE_ : Any=1e-6 , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : List[str]="divided_space_time" , SCREAMING_SNAKE_CASE_ : int=0 , **SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> List[str]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = num_frames
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = qkv_bias
__snake_case = attention_type
__snake_case = drop_path_rate
| 56 | 1 |
'''simple docstring'''
def _a (lowercase__ : int = 3 , lowercase__ : int = 7 , lowercase__ : int = 1_0_0_0_0_0_0 ) -> int:
"""simple docstring"""
__snake_case = 0
__snake_case = 1
for current_denominator in range(1 , limit + 1 ):
__snake_case = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
__snake_case = current_numerator
__snake_case = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_000_000))
| 56 |
'''simple docstring'''
from typing import Any
class _lowercase :
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Any ) -> Any:
__snake_case = data
__snake_case = None
class _lowercase :
def __init__( self : List[Any] ) -> Tuple:
__snake_case = None
def a ( self : int ) -> Union[str, Any]:
__snake_case = self.head
while temp is not None:
print(temp.data , end=' ' )
__snake_case = temp.next
print()
def a ( self : Dict , SCREAMING_SNAKE_CASE_ : Any ) -> List[str]:
__snake_case = Node(SCREAMING_SNAKE_CASE_ )
__snake_case = self.head
__snake_case = new_node
def a ( self : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any ) -> List[str]:
if node_data_a == node_data_a:
return
else:
__snake_case = self.head
while node_a is not None and node_a.data != node_data_a:
__snake_case = node_a.next
__snake_case = self.head
while node_a is not None and node_a.data != node_data_a:
__snake_case = node_a.next
if node_a is None or node_a is None:
return
__snake_case , __snake_case = node_a.data, node_a.data
if __name__ == "__main__":
_a : Dict = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("After swapping")
ll.print_list()
| 56 | 1 |
'''simple docstring'''
def _a (lowercase__ : Optional[Any] ) -> str:
"""simple docstring"""
__snake_case = len(lowercase__ )
for i in range(length - 1 ):
__snake_case = i
for k in range(i + 1 , lowercase__ ):
if collection[k] < collection[least]:
__snake_case = k
if least != i:
__snake_case , __snake_case = (collection[i], collection[least])
return collection
if __name__ == "__main__":
_a : Any = input("Enter numbers separated by a comma:\n").strip()
_a : Optional[Any] = [int(item) for item in user_input.split(",")]
print(selection_sort(unsorted))
| 56 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_a : int = {
"configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"],
"tokenization_tapas": ["TapasTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
"TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TapasForMaskedLM",
"TapasForQuestionAnswering",
"TapasForSequenceClassification",
"TapasModel",
"TapasPreTrainedModel",
"load_tf_weights_in_tapas",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = [
"TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFTapasForMaskedLM",
"TFTapasForQuestionAnswering",
"TFTapasForSequenceClassification",
"TFTapasModel",
"TFTapasPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
_a : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 56 | 1 |
'''simple docstring'''
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class _lowercase ( __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Dict = PriorTransformer
_SCREAMING_SNAKE_CASE : Dict = "hidden_states"
@property
def a ( self : Optional[Any] ) -> List[Any]:
__snake_case = 4
__snake_case = 8
__snake_case = 7
__snake_case = floats_tensor((batch_size, embedding_dim) ).to(SCREAMING_SNAKE_CASE_ )
__snake_case = floats_tensor((batch_size, embedding_dim) ).to(SCREAMING_SNAKE_CASE_ )
__snake_case = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(SCREAMING_SNAKE_CASE_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple=0 ) -> Dict:
torch.manual_seed(SCREAMING_SNAKE_CASE_ )
__snake_case = 4
__snake_case = 8
__snake_case = 7
__snake_case = torch.randn((batch_size, embedding_dim) ).to(SCREAMING_SNAKE_CASE_ )
__snake_case = torch.randn((batch_size, embedding_dim) ).to(SCREAMING_SNAKE_CASE_ )
__snake_case = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(SCREAMING_SNAKE_CASE_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def a ( self : Tuple ) -> Any:
return (4, 8)
@property
def a ( self : Tuple ) -> Any:
return (4, 8)
def a ( self : List[Any] ) -> List[Any]:
__snake_case = {
'num_attention_heads': 2,
'attention_head_dim': 4,
'num_layers': 2,
'embedding_dim': 8,
'num_embeddings': 7,
'additional_embeddings': 4,
}
__snake_case = self.dummy_input
return init_dict, inputs_dict
def a ( self : List[str] ) -> int:
__snake_case , __snake_case = PriorTransformer.from_pretrained(
'hf-internal-testing/prior-dummy' , output_loading_info=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(SCREAMING_SNAKE_CASE_ )
__snake_case = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def a ( self : int ) -> Any:
__snake_case , __snake_case = self.prepare_init_args_and_inputs_for_common()
__snake_case = self.model_class(**SCREAMING_SNAKE_CASE_ )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['hidden_states', 'timestep']
self.assertListEqual(arg_names[:2] , SCREAMING_SNAKE_CASE_ )
def a ( self : Union[str, Any] ) -> Dict:
__snake_case = PriorTransformer.from_pretrained('hf-internal-testing/prior-dummy' )
__snake_case = model.to(SCREAMING_SNAKE_CASE_ )
if hasattr(SCREAMING_SNAKE_CASE_ , 'set_default_attn_processor' ):
model.set_default_attn_processor()
__snake_case = self.get_dummy_seed_input()
with torch.no_grad():
__snake_case = model(**SCREAMING_SNAKE_CASE_ )[0]
__snake_case = output[0, :5].flatten().cpu()
print(SCREAMING_SNAKE_CASE_ )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
__snake_case = torch.tensor([-1.3_4_3_6, -0.2_8_7_0, 0.7_5_3_8, 0.4_3_6_8, -0.0_2_3_9] )
self.assertTrue(torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , rtol=1e-2 ) )
@slow
class _lowercase ( unittest.TestCase ):
def a ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any]=1 , SCREAMING_SNAKE_CASE_ : List[Any]=768 , SCREAMING_SNAKE_CASE_ : int=77 , SCREAMING_SNAKE_CASE_ : Tuple=0 ) -> Any:
torch.manual_seed(SCREAMING_SNAKE_CASE_ )
__snake_case = batch_size
__snake_case = embedding_dim
__snake_case = num_embeddings
__snake_case = torch.randn((batch_size, embedding_dim) ).to(SCREAMING_SNAKE_CASE_ )
__snake_case = torch.randn((batch_size, embedding_dim) ).to(SCREAMING_SNAKE_CASE_ )
__snake_case = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(SCREAMING_SNAKE_CASE_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def a ( self : Tuple ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5_8_6_1, 0.1_2_8_3, -0.0_9_3_1, 0.0_8_8_2, 0.4_4_7_6, 0.1_3_2_9, -0.0_4_9_8, 0.0_6_4_0]],
[37, [-0.4_9_1_3, 0.0_1_1_0, -0.0_4_8_3, 0.0_5_4_1, 0.4_9_5_4, -0.0_1_7_0, 0.0_3_5_4, 0.1_6_5_1]],
# fmt: on
] )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict ) -> List[Any]:
__snake_case = PriorTransformer.from_pretrained('kandinsky-community/kandinsky-2-1-prior' , subfolder='prior' )
model.to(SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_dummy_seed_input(seed=SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model(**SCREAMING_SNAKE_CASE_ )[0]
assert list(sample.shape ) == [1, 768]
__snake_case = sample[0, :8].flatten().cpu()
print(SCREAMING_SNAKE_CASE_ )
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE_ )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 )
| 56 |
'''simple docstring'''
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _lowercase ( __lowercase , __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : List[str] = AutoencoderKL
_SCREAMING_SNAKE_CASE : Union[str, Any] = "sample"
_SCREAMING_SNAKE_CASE : Union[str, Any] = 1e-2
@property
def a ( self : List[str] ) -> Optional[int]:
__snake_case = 4
__snake_case = 3
__snake_case = (32, 32)
__snake_case = floats_tensor((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE_ )
return {"sample": image}
@property
def a ( self : List[Any] ) -> List[Any]:
return (3, 32, 32)
@property
def a ( self : int ) -> int:
return (3, 32, 32)
def a ( self : Tuple ) -> Union[str, Any]:
__snake_case = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
__snake_case = self.dummy_input
return init_dict, inputs_dict
def a ( self : Optional[Any] ) -> Any:
pass
def a ( self : Tuple ) -> List[Any]:
pass
@unittest.skipIf(torch_device == 'mps' , 'Gradient checkpointing skipped on MPS' )
def a ( self : List[str] ) -> int:
# enable deterministic behavior for gradient checkpointing
__snake_case , __snake_case = self.prepare_init_args_and_inputs_for_common()
__snake_case = self.model_class(**SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
assert not model.is_gradient_checkpointing and model.training
__snake_case = model(**SCREAMING_SNAKE_CASE_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
__snake_case = torch.randn_like(SCREAMING_SNAKE_CASE_ )
__snake_case = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
__snake_case = self.model_class(**SCREAMING_SNAKE_CASE_ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(SCREAMING_SNAKE_CASE_ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
__snake_case = model_a(**SCREAMING_SNAKE_CASE_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
__snake_case = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
__snake_case = dict(model.named_parameters() )
__snake_case = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) )
def a ( self : int ) -> int:
__snake_case , __snake_case = AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy' , output_loading_info=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(SCREAMING_SNAKE_CASE_ )
__snake_case = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def a ( self : Optional[int] ) -> List[str]:
__snake_case = AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy' )
__snake_case = model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
if torch_device == "mps":
__snake_case = torch.manual_seed(0 )
else:
__snake_case = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
__snake_case = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
__snake_case = image.to(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ , sample_posterior=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).sample
__snake_case = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
__snake_case = torch.tensor(
[
-4.0_078e-01,
-3.8_323e-04,
-1.2_681e-01,
-1.1_462e-01,
2.0_095e-01,
1.0_893e-01,
-8.8_247e-02,
-3.0_361e-01,
-9.8_644e-03,
] )
elif torch_device == "cpu":
__snake_case = torch.tensor(
[-0.1_3_5_2, 0.0_8_7_8, 0.0_4_1_9, -0.0_8_1_8, -0.1_0_6_9, 0.0_6_8_8, -0.1_4_5_8, -0.4_4_4_6, -0.0_0_2_6] )
else:
__snake_case = torch.tensor(
[-0.2_4_2_1, 0.4_6_4_2, 0.2_5_0_7, -0.0_4_3_8, 0.0_6_8_2, 0.3_1_6_0, -0.2_0_1_8, -0.0_7_2_7, 0.2_4_8_5] )
self.assertTrue(torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , rtol=1e-2 ) )
@slow
class _lowercase ( unittest.TestCase ):
def a ( self : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any ) -> Union[str, Any]:
return f'gaussian_noise_s={seed}_shape={"_".join([str(SCREAMING_SNAKE_CASE_ ) for s in shape] )}.npy'
def a ( self : Optional[Any] ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : int , SCREAMING_SNAKE_CASE_ : Optional[Any]=0 , SCREAMING_SNAKE_CASE_ : int=(4, 3, 512, 512) , SCREAMING_SNAKE_CASE_ : str=False ) -> int:
__snake_case = torch.floataa if fpaa else torch.floataa
__snake_case = torch.from_numpy(load_hf_numpy(self.get_file_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) ).to(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
return image
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple="CompVis/stable-diffusion-v1-4" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False ) -> List[str]:
__snake_case = 'fp16' if fpaa else None
__snake_case = torch.floataa if fpaa else torch.floataa
__snake_case = AutoencoderKL.from_pretrained(
SCREAMING_SNAKE_CASE_ , subfolder='vae' , torch_dtype=SCREAMING_SNAKE_CASE_ , revision=SCREAMING_SNAKE_CASE_ , )
model.to(SCREAMING_SNAKE_CASE_ ).eval()
return model
def a ( self : List[str] , SCREAMING_SNAKE_CASE_ : Tuple=0 ) -> Union[str, Any]:
if torch_device == "mps":
return torch.manual_seed(SCREAMING_SNAKE_CASE_ )
return torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_3, 0.9_8_7_8, -0.0_4_9_5, -0.0_7_9_0, -0.2_7_0_9, 0.8_3_7_5, -0.2_0_6_0, -0.0_8_2_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_6, 0.1_1_6_8, 0.1_3_3_2, -0.4_8_4_0, -0.2_5_0_8, -0.0_7_9_1, -0.0_4_9_3, -0.4_0_8_9], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def a ( self : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_generator(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , sample_posterior=SCREAMING_SNAKE_CASE_ ).sample
assert sample.shape == image.shape
__snake_case = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__snake_case = torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_5_1_3, 0.0_2_8_9, 1.3_7_9_9, 0.2_1_6_6, -0.2_5_7_3, -0.0_8_7_1, 0.5_1_0_3, -0.0_9_9_9]],
[47, [-0.4_1_2_8, -0.1_3_2_0, -0.3_7_0_4, 0.1_9_6_5, -0.4_1_1_6, -0.2_3_3_2, -0.3_3_4_0, 0.2_2_4_7]],
# fmt: on
] )
@require_torch_gpu
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Union[str, Any]:
__snake_case = self.get_sd_vae_model(fpaa=SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , fpaa=SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_generator(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , sample_posterior=SCREAMING_SNAKE_CASE_ ).sample
assert sample.shape == image.shape
__snake_case = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE_ )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_9, 0.9_8_6_6, -0.0_4_8_7, -0.0_7_7_7, -0.2_7_1_6, 0.8_3_6_8, -0.2_0_5_5, -0.0_8_1_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_7, 0.1_1_4_7, 0.1_3_3_3, -0.4_8_4_1, -0.2_5_0_6, -0.0_8_0_5, -0.0_4_9_1, -0.4_0_8_5], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def a ( self : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict ) -> List[Any]:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ ).sample
assert sample.shape == image.shape
__snake_case = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__snake_case = torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_0_5_1, -0.1_8_0_3, -0.2_3_1_1, -0.2_1_1_4, -0.3_2_9_2, -0.3_5_7_4, -0.2_9_5_3, -0.3_3_2_3]],
[37, [-0.2_6_3_2, -0.2_6_2_5, -0.2_1_9_9, -0.2_7_4_1, -0.4_5_3_9, -0.4_9_9_0, -0.3_7_2_0, -0.4_9_2_5]],
# fmt: on
] )
@require_torch_gpu
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> int:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__snake_case = sample[-1, -2:, :2, -2:].flatten().cpu()
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE_ )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_3_6_9, 0.0_2_0_7, -0.0_7_7_6, -0.0_6_8_2, -0.1_7_4_7, -0.1_9_3_0, -0.1_4_6_5, -0.2_0_3_9]],
[16, [-0.1_6_2_8, -0.2_1_3_4, -0.2_7_4_7, -0.2_6_4_2, -0.3_7_7_4, -0.4_4_0_4, -0.3_6_8_7, -0.4_2_7_7]],
# fmt: on
] )
@require_torch_gpu
def a ( self : int , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> str:
__snake_case = self.get_sd_vae_model(fpaa=SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 64, 64) , fpaa=SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__snake_case = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE_ )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='xformers is not required when using PyTorch 2.0.' )
def a ( self : Any , SCREAMING_SNAKE_CASE_ : int ) -> Tuple:
__snake_case = self.get_sd_vae_model(fpaa=SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 64, 64) , fpaa=SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='xformers is not required when using PyTorch 2.0.' )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE_ : int ) -> str:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_0_0_1, 0.0_9_1_8, -2.6_9_8_4, -3.9_7_2_0, -3.2_0_9_9, -5.0_3_5_3, 1.7_3_3_8, -0.2_0_6_5, 3.4_2_6_7]],
[47, [-1.5_0_3_0, -4.3_8_7_1, -6.0_3_5_5, -9.1_1_5_7, -1.6_6_6_1, -2.7_8_5_3, 2.1_6_0_7, -5.0_8_2_3, 2.5_6_3_3]],
# fmt: on
] )
def a ( self : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Union[str, Any]:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_generator(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model.encode(SCREAMING_SNAKE_CASE_ ).latent_dist
__snake_case = dist.sample(generator=SCREAMING_SNAKE_CASE_ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
__snake_case = sample[0, -1, -3:, -3:].flatten().cpu()
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE_ )
__snake_case = 3e-3 if torch_device != 'mps' else 1e-2
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=SCREAMING_SNAKE_CASE_ )
| 56 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : List[str] = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Any = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
_a : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 56 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _lowercase ( __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ShapEPipeline
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["prompt"]
_SCREAMING_SNAKE_CASE : Any = ["prompt"]
_SCREAMING_SNAKE_CASE : str = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
_SCREAMING_SNAKE_CASE : Optional[int] = False
@property
def a ( self : Any ) -> Optional[int]:
return 32
@property
def a ( self : List[Any] ) -> List[Any]:
return 32
@property
def a ( self : Tuple ) -> List[str]:
return self.time_input_dim * 4
@property
def a ( self : Dict ) -> Union[str, Any]:
return 8
@property
def a ( self : List[Any] ) -> Optional[Any]:
__snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def a ( self : Dict ) -> Any:
torch.manual_seed(0 )
__snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(SCREAMING_SNAKE_CASE_ )
@property
def a ( self : str ) -> Dict:
torch.manual_seed(0 )
__snake_case = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__snake_case = PriorTransformer(**SCREAMING_SNAKE_CASE_ )
return model
@property
def a ( self : Optional[Any] ) -> Dict:
torch.manual_seed(0 )
__snake_case = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__snake_case = ShapERenderer(**SCREAMING_SNAKE_CASE_ )
return model
def a ( self : Tuple ) -> Dict:
__snake_case = self.dummy_prior
__snake_case = self.dummy_text_encoder
__snake_case = self.dummy_tokenizer
__snake_case = self.dummy_renderer
__snake_case = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1024 , prediction_type='sample' , use_karras_sigmas=SCREAMING_SNAKE_CASE_ , clip_sample=SCREAMING_SNAKE_CASE_ , clip_sample_range=1.0 , )
__snake_case = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def a ( self : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int]=0 ) -> Union[str, Any]:
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
__snake_case = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
__snake_case = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
__snake_case = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def a ( self : Optional[Any] ) -> str:
__snake_case = 'cpu'
__snake_case = self.get_dummy_components()
__snake_case = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
__snake_case = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__snake_case = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) )
__snake_case = output.images[0]
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__snake_case = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a ( self : int ) -> List[str]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def a ( self : Dict ) -> Any:
__snake_case = torch_device == 'cpu'
__snake_case = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=SCREAMING_SNAKE_CASE_ , relax_max_difference=SCREAMING_SNAKE_CASE_ , )
def a ( self : Union[str, Any] ) -> str:
__snake_case = self.get_dummy_components()
__snake_case = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
__snake_case = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__snake_case = 1
__snake_case = 2
__snake_case = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
for key in inputs.keys():
if key in self.batch_params:
__snake_case = batch_size * [inputs[key]]
__snake_case = pipe(**SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
def a ( self : Optional[int] ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : Union[str, Any] ) -> Optional[Any]:
__snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
__snake_case = ShapEPipeline.from_pretrained('openai/shap-e' )
__snake_case = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__snake_case = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
__snake_case = pipe(
'a shark' , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 56 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
_a : int = {"configuration_dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Union[str, Any] = ["DPTFeatureExtractor"]
_a : List[str] = ["DPTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[str] = [
"DPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DPTForDepthEstimation",
"DPTForSemanticSegmentation",
"DPTModel",
"DPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
_a : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 56 |
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
_a : Optional[Any] = 100
_a : Dict = set(range(3, NUM_PRIMES, 2))
primes.add(2)
_a : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_0_0 )
def _a (lowercase__ : int ) -> set[int]:
"""simple docstring"""
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
__snake_case = set()
__snake_case = 42
__snake_case = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def _a (lowercase__ : int = 5_0_0_0 ) -> int | None:
"""simple docstring"""
for number_to_partition in range(1 , lowercase__ ):
if len(partition(lowercase__ ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f'''{solution() = }''')
| 56 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : str = logging.get_logger(__name__)
_a : int = {
"google/vivit-b-16x2-kinetics400": (
"https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : List[str] = "vivit"
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any]=224 , SCREAMING_SNAKE_CASE_ : List[str]=32 , SCREAMING_SNAKE_CASE_ : List[Any]=[2, 16, 16] , SCREAMING_SNAKE_CASE_ : Optional[int]=3 , SCREAMING_SNAKE_CASE_ : List[Any]=768 , SCREAMING_SNAKE_CASE_ : Any=12 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=12 , SCREAMING_SNAKE_CASE_ : Optional[Any]=3072 , SCREAMING_SNAKE_CASE_ : List[str]="gelu_fast" , SCREAMING_SNAKE_CASE_ : Any=0.0 , SCREAMING_SNAKE_CASE_ : int=0.0 , SCREAMING_SNAKE_CASE_ : int=0.0_2 , SCREAMING_SNAKE_CASE_ : List[Any]=1e-06 , SCREAMING_SNAKE_CASE_ : int=True , **SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> Optional[int]:
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = image_size
__snake_case = num_frames
__snake_case = tubelet_size
__snake_case = num_channels
__snake_case = qkv_bias
super().__init__(**SCREAMING_SNAKE_CASE_ )
| 56 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_a : str = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def _a () -> Dict:
"""simple docstring"""
__snake_case = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
__snake_case = get_sagemaker_input()
else:
__snake_case = get_cluster_input()
return config
def _a (lowercase__ : Union[str, Any]=None ) -> int:
"""simple docstring"""
if subparsers is not None:
__snake_case = subparsers.add_parser('config' , description=lowercase__ )
else:
__snake_case = argparse.ArgumentParser('Accelerate config command' , description=lowercase__ )
parser.add_argument(
'--config_file' , default=lowercase__ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=lowercase__ )
return parser
def _a (lowercase__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case = get_user_input()
if args.config_file is not None:
__snake_case = args.config_file
else:
if not os.path.isdir(lowercase__ ):
os.makedirs(lowercase__ )
__snake_case = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(lowercase__ )
else:
config.to_yaml_file(lowercase__ )
print(f'accelerate configuration saved at {config_file}' )
def _a () -> int:
"""simple docstring"""
__snake_case = config_command_parser()
__snake_case = parser.parse_args()
config_command(lowercase__ )
if __name__ == "__main__":
main()
| 56 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_a : int = {
"configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"],
"tokenization_tapas": ["TapasTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
"TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TapasForMaskedLM",
"TapasForQuestionAnswering",
"TapasForSequenceClassification",
"TapasModel",
"TapasPreTrainedModel",
"load_tf_weights_in_tapas",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = [
"TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFTapasForMaskedLM",
"TFTapasForQuestionAnswering",
"TFTapasForSequenceClassification",
"TFTapasModel",
"TFTapasPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
_a : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 56 |
'''simple docstring'''
from __future__ import annotations
import math
def _a (lowercase__ : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowercase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
_a : Dict = [num for num in range(3, 100_001, 2) if not is_prime(num)]
def _a (lowercase__ : int ) -> list[int]:
"""simple docstring"""
if not isinstance(lowercase__ , lowercase__ ):
raise ValueError('n must be an integer' )
if n <= 0:
raise ValueError('n must be >= 0' )
__snake_case = []
for num in range(len(lowercase__ ) ):
__snake_case = 0
while 2 * i * i <= odd_composites[num]:
__snake_case = odd_composites[num] - 2 * i * i
if is_prime(lowercase__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(lowercase__ ) == n:
return list_nums
return []
def _a () -> int:
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 56 | 1 |
'''simple docstring'''
import math
import unittest
def _a (lowercase__ : int ) -> bool:
"""simple docstring"""
assert isinstance(lowercase__ , lowercase__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowercase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class _lowercase ( unittest.TestCase ):
def a ( self : Optional[Any] ) -> Any:
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def a ( self : Tuple ) -> Dict:
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , 'Zero doesn\'t have any positive factors, primes must have exactly two.' , )
self.assertFalse(
is_prime(1 ) , 'One only has 1 positive factor, primes must have exactly two.' , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 56 |
'''simple docstring'''
from __future__ import annotations
def _a (lowercase__ : int , lowercase__ : int ) -> list[str]:
"""simple docstring"""
if partitions <= 0:
raise ValueError('partitions must be a positive number!' )
if partitions > number_of_bytes:
raise ValueError('partitions can not > number_of_bytes!' )
__snake_case = number_of_bytes // partitions
__snake_case = []
for i in range(lowercase__ ):
__snake_case = i * bytes_per_partition + 1
__snake_case = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 | 1 |
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def _a (lowercase__ : Tuple , lowercase__ : bool = True , lowercase__ : float = math.inf , lowercase__ : float = -math.inf , lowercase__ : float = math.inf , lowercase__ : float = -math.inf , lowercase__ : bool = False , lowercase__ : float = 1_0_0 , lowercase__ : float = 0.01 , lowercase__ : float = 1 , ) -> Any:
"""simple docstring"""
__snake_case = False
__snake_case = search_prob
__snake_case = start_temperate
__snake_case = []
__snake_case = 0
__snake_case = None
while not search_end:
__snake_case = current_state.score()
if best_state is None or current_score > best_state.score():
__snake_case = current_state
scores.append(lowercase__ )
iterations += 1
__snake_case = None
__snake_case = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
__snake_case = random.randint(0 , len(lowercase__ ) - 1 ) # picking a random neighbor
__snake_case = neighbors.pop(lowercase__ )
__snake_case = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
__snake_case = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
__snake_case = picked_neighbor
else:
__snake_case = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
__snake_case = picked_neighbor
__snake_case = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
__snake_case = True
else:
__snake_case = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(lowercase__ ) , lowercase__ )
plt.xlabel('Iterations' )
plt.ylabel('Function values' )
plt.show()
return best_state
if __name__ == "__main__":
def _a (lowercase__ : Tuple , lowercase__ : List[str] ) -> Any:
"""simple docstring"""
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
_a : List[Any] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
_a : Tuple = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
_a : str = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
_a : str = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def _a (lowercase__ : List[str] , lowercase__ : Dict ) -> str:
"""simple docstring"""
return (3 * x**2) - (6 * y)
_a : List[str] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
_a : Dict = simulated_annealing(prob, find_max=False, visualization=True)
print(
"The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
f'''{local_min.score()}'''
)
_a : Tuple = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
_a : List[str] = simulated_annealing(prob, find_max=True, visualization=True)
print(
"The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
f'''{local_min.score()}'''
)
| 56 |
'''simple docstring'''
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class _lowercase ( __lowercase ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.0_1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1000 ) -> Tuple:
__snake_case = p_stop
__snake_case = max_length
def __iter__( self : Any ) -> Union[str, Any]:
__snake_case = 0
__snake_case = False
while not stop and count < self.max_length:
yield count
count += 1
__snake_case = random.random() < self.p_stop
class _lowercase ( unittest.TestCase ):
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : str=False , SCREAMING_SNAKE_CASE_ : str=True ) -> Union[str, Any]:
__snake_case = [
BatchSamplerShard(SCREAMING_SNAKE_CASE_ , 2 , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
for i in range(2 )
]
__snake_case = [list(SCREAMING_SNAKE_CASE_ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(SCREAMING_SNAKE_CASE_ ) for shard in batch_sampler_shards] , [len(SCREAMING_SNAKE_CASE_ ) for e in expected] )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Tuple ) -> str:
# Check the shards when the dataset is a round multiple of total batch size.
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] ) -> Union[str, Any]:
# Check the shards when the dataset is a round multiple of batch size.
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size.
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
def a ( self : str ) -> str:
# Check the shards when the dataset is a round multiple of total batch size.
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[[0, 1]], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
def a ( self : int ) -> Tuple:
# Check the shards when the dataset is a round multiple of batch size.
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size.
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[[0, 1]], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[int] ) -> Tuple:
__snake_case = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
__snake_case = [BatchSamplerShard(SCREAMING_SNAKE_CASE_ , 2 , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int=False , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : int=False ) -> List[Any]:
random.seed(SCREAMING_SNAKE_CASE_ )
__snake_case = list(SCREAMING_SNAKE_CASE_ )
__snake_case = [
IterableDatasetShard(
SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , drop_last=SCREAMING_SNAKE_CASE_ , num_processes=SCREAMING_SNAKE_CASE_ , process_index=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , )
for i in range(SCREAMING_SNAKE_CASE_ )
]
__snake_case = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(SCREAMING_SNAKE_CASE_ )
iterable_dataset_lists.append(list(SCREAMING_SNAKE_CASE_ ) )
__snake_case = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
__snake_case = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
self.assertTrue(len(SCREAMING_SNAKE_CASE_ ) % shard_batch_size == 0 )
__snake_case = []
for idx in range(0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(SCREAMING_SNAKE_CASE_ ) < len(SCREAMING_SNAKE_CASE_ ):
reference += reference
self.assertListEqual(SCREAMING_SNAKE_CASE_ , reference[: len(SCREAMING_SNAKE_CASE_ )] )
def a ( self : Dict ) -> Tuple:
__snake_case = 42
__snake_case = RandomIterableDataset()
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Edge case with a very small dataset
__snake_case = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] ) -> str:
__snake_case = BatchSampler(range(16 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = SkipBatchSampler(SCREAMING_SNAKE_CASE_ , 2 )
self.assertListEqual(list(SCREAMING_SNAKE_CASE_ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a ( self : str ) -> Union[str, Any]:
__snake_case = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a ( self : Any ) -> str:
__snake_case = DataLoader(list(range(16 ) ) , batch_size=4 )
__snake_case = skip_first_batches(SCREAMING_SNAKE_CASE_ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a ( self : Dict ) -> Optional[Any]:
__snake_case = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def a ( self : Tuple ) -> Dict:
Accelerator()
__snake_case = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 56 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _lowercase ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : Optional[int] = ["flax"]
def __init__( self : Dict , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Tuple ) -> Dict:
requires_backends(self , ['flax'] )
@classmethod
def a ( cls : List[str] , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Dict ) -> Dict:
requires_backends(cls , ['flax'] )
@classmethod
def a ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Any:
requires_backends(cls , ['flax'] )
class _lowercase ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["flax"]
def __init__( self : str , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : str ) -> Dict:
requires_backends(self , ['flax'] )
@classmethod
def a ( cls : Any , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Any ) -> Optional[int]:
requires_backends(cls , ['flax'] )
@classmethod
def a ( cls : Tuple , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Union[str, Any]:
requires_backends(cls , ['flax'] )
class _lowercase ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : Any = ["flax"]
def __init__( self : Tuple , *SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> str:
requires_backends(self , ['flax'] )
@classmethod
def a ( cls : List[Any] , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> Tuple:
requires_backends(cls , ['flax'] )
@classmethod
def a ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : Tuple ) -> List[str]:
requires_backends(cls , ['flax'] )
class _lowercase ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : Dict = ["flax"]
def __init__( self : str , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : Tuple ) -> str:
requires_backends(self , ['flax'] )
@classmethod
def a ( cls : List[str] , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> Tuple:
requires_backends(cls , ['flax'] )
@classmethod
def a ( cls : int , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : str ) -> Optional[Any]:
requires_backends(cls , ['flax'] )
class _lowercase ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : Any = ["flax"]
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> List[Any]:
requires_backends(self , ['flax'] )
@classmethod
def a ( cls : str , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> int:
requires_backends(cls , ['flax'] )
@classmethod
def a ( cls : List[str] , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : str ) -> Dict:
requires_backends(cls , ['flax'] )
class _lowercase ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : int = ["flax"]
def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Dict:
requires_backends(self , ['flax'] )
@classmethod
def a ( cls : List[Any] , *SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : str ) -> List[str]:
requires_backends(cls , ['flax'] )
@classmethod
def a ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Dict:
requires_backends(cls , ['flax'] )
class _lowercase ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : Tuple = ["flax"]
def __init__( self : Tuple , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : Any ) -> Optional[Any]:
requires_backends(self , ['flax'] )
@classmethod
def a ( cls : str , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int:
requires_backends(cls , ['flax'] )
@classmethod
def a ( cls : int , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Tuple ) -> Dict:
requires_backends(cls , ['flax'] )
class _lowercase ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : int = ["flax"]
def __init__( self : Any , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Union[str, Any]:
requires_backends(self , ['flax'] )
@classmethod
def a ( cls : int , *SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : int ) -> Optional[int]:
requires_backends(cls , ['flax'] )
@classmethod
def a ( cls : Optional[int] , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : int ) -> Dict:
requires_backends(cls , ['flax'] )
class _lowercase ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = ["flax"]
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Optional[Any]:
requires_backends(self , ['flax'] )
@classmethod
def a ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Any:
requires_backends(cls , ['flax'] )
@classmethod
def a ( cls : Optional[int] , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : Any ) -> List[str]:
requires_backends(cls , ['flax'] )
class _lowercase ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : List[Any] = ["flax"]
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : Dict ) -> str:
requires_backends(self , ['flax'] )
@classmethod
def a ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
requires_backends(cls , ['flax'] )
@classmethod
def a ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Dict ) -> List[str]:
requires_backends(cls , ['flax'] )
class _lowercase ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = ["flax"]
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> Tuple:
requires_backends(self , ['flax'] )
@classmethod
def a ( cls : Any , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : Tuple ) -> Dict:
requires_backends(cls , ['flax'] )
@classmethod
def a ( cls : List[str] , *SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : Any ) -> int:
requires_backends(cls , ['flax'] )
class _lowercase ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = ["flax"]
def __init__( self : Dict , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> int:
requires_backends(self , ['flax'] )
@classmethod
def a ( cls : str , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Any ) -> Any:
requires_backends(cls , ['flax'] )
@classmethod
def a ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ['flax'] )
class _lowercase ( metaclass=__lowercase ):
_SCREAMING_SNAKE_CASE : List[Any] = ["flax"]
def __init__( self : str , *SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : Tuple ) -> Any:
requires_backends(self , ['flax'] )
@classmethod
def a ( cls : int , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> Union[str, Any]:
requires_backends(cls , ['flax'] )
@classmethod
def a ( cls : Any , *SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> List[str]:
requires_backends(cls , ['flax'] )
| 56 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
_a : int = get_tests_dir("fixtures/test_sentencepiece.model")
_a : Dict = {"target_lang": "fi", "source_lang": "en"}
_a : Optional[int] = ">>zh<<"
_a : List[str] = "Helsinki-NLP/"
if is_torch_available():
_a : List[str] = "pt"
elif is_tf_available():
_a : Dict = "tf"
else:
_a : Union[str, Any] = "jax"
@require_sentencepiece
class _lowercase ( __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : int = MarianTokenizer
_SCREAMING_SNAKE_CASE : str = False
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
def a ( self : int ) -> int:
super().setUp()
__snake_case = ['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>']
__snake_case = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
__snake_case = Path(self.tmpdirname )
save_json(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['vocab'] )
save_json(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['tokenizer_config_file'] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['source_spm'] )
copyfile(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['target_spm'] )
__snake_case = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def a ( self : int , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> MarianTokenizer:
return MarianTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def a ( self : str , SCREAMING_SNAKE_CASE_ : List[str] ) -> List[Any]:
return (
"This is a test",
"This is a test",
)
def a ( self : int ) -> Optional[Any]:
__snake_case = '</s>'
__snake_case = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def a ( self : Dict ) -> List[str]:
__snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '</s>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '<pad>' )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 9 )
def a ( self : List[Any] ) -> str:
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def a ( self : Any ) -> Optional[int]:
__snake_case = MarianTokenizer.from_pretrained(f'{ORG_NAME}opus-mt-en-de' )
__snake_case = en_de_tokenizer(['I am a small frog'] , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = [38, 121, 14, 697, 3_8848, 0]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , batch.input_ids[0] )
__snake_case = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
__snake_case = [x.name for x in Path(SCREAMING_SNAKE_CASE_ ).glob('*' )]
self.assertIn('source.spm' , SCREAMING_SNAKE_CASE_ )
MarianTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[int] ) -> Any:
__snake_case = self.get_tokenizer()
__snake_case = tok(
['I am a small frog' * 1000, 'I am a small frog'] , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def a ( self : Tuple ) -> Dict:
__snake_case = self.get_tokenizer()
__snake_case = tok(['I am a tiny frog', 'I am a small frog'] , padding=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def a ( self : int ) -> int:
# fmt: off
__snake_case = {'input_ids': [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE_ , model_name='Helsinki-NLP/opus-mt-en-de' , revision='1a8c2263da11e68e50938f97e10cd57820bd504c' , decode_kwargs={'use_source_tokenizer': True} , )
def a ( self : Dict ) -> str:
__snake_case = MarianTokenizer.from_pretrained('hf-internal-testing/test-marian-two-vocabs' )
__snake_case = 'Tämä on testi'
__snake_case = 'This is a test'
__snake_case = [76, 7, 2047, 2]
__snake_case = [69, 12, 11, 940, 2]
__snake_case = tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer(text_target=SCREAMING_SNAKE_CASE_ ).input_ids
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 56 | 1 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def _a (lowercase__ : list ) -> int:
"""simple docstring"""
if not postfix_notation:
return 0
__snake_case = {'+', '-', '*', '/'}
__snake_case = []
for token in postfix_notation:
if token in operations:
__snake_case , __snake_case = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(lowercase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def _a (lowercase__ : bytes ) -> bytes:
"""simple docstring"""
if len(lowercase__ ) != 3_2:
raise ValueError('Input must be of length 32' )
__snake_case = B''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def _a (lowercase__ : int ) -> bytes:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
__snake_case = format(lowercase__ , '08x' )[-8:]
__snake_case = B''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def _a (lowercase__ : bytes ) -> bytes:
"""simple docstring"""
__snake_case = B''
for char in message:
bit_string += format(lowercase__ , '08b' ).encode('utf-8' )
__snake_case = format(len(lowercase__ ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(lowercase__ ) % 5_1_2 != 4_4_8:
bit_string += b"0"
bit_string += to_little_endian(start_len[3_2:] ) + to_little_endian(start_len[:3_2] )
return bit_string
def _a (lowercase__ : bytes ) -> Generator[list[int], None, None]:
"""simple docstring"""
if len(lowercase__ ) % 5_1_2 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(lowercase__ ) , 5_1_2 ):
__snake_case = bit_string[pos : pos + 5_1_2]
__snake_case = []
for i in range(0 , 5_1_2 , 3_2 ):
block_words.append(int(to_little_endian(block[i : i + 3_2] ) , 2 ) )
yield block_words
def _a (lowercase__ : int ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
__snake_case = format(lowercase__ , '032b' )
__snake_case = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(lowercase__ , 2 )
def _a (lowercase__ : int , lowercase__ : int ) -> int:
"""simple docstring"""
return (a + b) % 2**3_2
def _a (lowercase__ : int , lowercase__ : int ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (3_2 - shift))) % 2**3_2
def _a (lowercase__ : bytes ) -> bytes:
"""simple docstring"""
__snake_case = preprocess(lowercase__ )
__snake_case = [int(2**3_2 * abs(sin(i + 1 ) ) ) for i in range(6_4 )]
# Starting states
__snake_case = 0x6_7_4_5_2_3_0_1
__snake_case = 0xE_F_C_D_A_B_8_9
__snake_case = 0x9_8_B_A_D_C_F_E
__snake_case = 0x1_0_3_2_5_4_7_6
__snake_case = [
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(lowercase__ ):
__snake_case = aa
__snake_case = ba
__snake_case = ca
__snake_case = da
# Hash current chunk
for i in range(6_4 ):
if i <= 1_5:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
__snake_case = d ^ (b & (c ^ d))
__snake_case = i
elif i <= 3_1:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
__snake_case = c ^ (d & (b ^ c))
__snake_case = (5 * i + 1) % 1_6
elif i <= 4_7:
__snake_case = b ^ c ^ d
__snake_case = (3 * i + 5) % 1_6
else:
__snake_case = c ^ (b | not_aa(lowercase__ ))
__snake_case = (7 * i) % 1_6
__snake_case = (f + a + added_consts[i] + block_words[g]) % 2**3_2
__snake_case = d
__snake_case = c
__snake_case = b
__snake_case = sum_aa(lowercase__ , left_rotate_aa(lowercase__ , shift_amounts[i] ) )
# Add hashed chunk to running total
__snake_case = sum_aa(lowercase__ , lowercase__ )
__snake_case = sum_aa(lowercase__ , lowercase__ )
__snake_case = sum_aa(lowercase__ , lowercase__ )
__snake_case = sum_aa(lowercase__ , lowercase__ )
__snake_case = reformat_hex(lowercase__ ) + reformat_hex(lowercase__ ) + reformat_hex(lowercase__ ) + reformat_hex(lowercase__ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 | 1 |
'''simple docstring'''
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class _lowercase :
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=13 , SCREAMING_SNAKE_CASE_ : List[Any]=7 , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : Tuple=99 , SCREAMING_SNAKE_CASE_ : Dict=32 , SCREAMING_SNAKE_CASE_ : str=5 , SCREAMING_SNAKE_CASE_ : Dict=4 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=37 , SCREAMING_SNAKE_CASE_ : Any="gelu" , SCREAMING_SNAKE_CASE_ : Tuple=0.1 , SCREAMING_SNAKE_CASE_ : Dict=0.1 , SCREAMING_SNAKE_CASE_ : Optional[int]=50 , SCREAMING_SNAKE_CASE_ : str=0.0_2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : Optional[int]=None , ) -> Dict:
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = initializer_range
__snake_case = use_labels
__snake_case = scope
def a ( self : str ) -> Tuple:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = None
if self.use_input_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = self.get_config()
return config, input_ids, input_mask, token_labels
def a ( self : List[Any] ) -> Optional[int]:
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , )
def a ( self : int ) -> Dict:
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = self.prepare_config_and_inputs()
__snake_case = True
__snake_case = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__snake_case = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def a ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : int , ) -> Optional[Any]:
__snake_case = BertGenerationEncoder(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__snake_case = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )
__snake_case = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : Dict , ) -> List[Any]:
__snake_case = True
__snake_case = BertGenerationEncoder(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__snake_case = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , encoder_attention_mask=SCREAMING_SNAKE_CASE_ , )
__snake_case = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : Union[str, Any] , ) -> Union[str, Any]:
__snake_case = True
__snake_case = True
__snake_case = BertGenerationDecoder(config=SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ).eval()
# first forward pass
__snake_case = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , encoder_attention_mask=SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ , )
__snake_case = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size )
__snake_case = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__snake_case = torch.cat([input_ids, next_tokens] , dim=-1 )
__snake_case = torch.cat([input_mask, next_mask] , dim=-1 )
__snake_case = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , encoder_attention_mask=SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , )['hidden_states'][0]
__snake_case = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , encoder_attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , )['hidden_states'][0]
# select random slice
__snake_case = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__snake_case = output_from_no_past[:, -3:, random_slice_idx].detach()
__snake_case = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
def a ( self : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any , *SCREAMING_SNAKE_CASE_ : List[Any] , ) -> Any:
__snake_case = BertGenerationDecoder(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__snake_case = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a ( self : str ) -> Union[str, Any]:
__snake_case , __snake_case , __snake_case , __snake_case = self.prepare_config_and_inputs()
__snake_case = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _lowercase ( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE : Dict = (BertGenerationDecoder,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE : str = (
{"feature-extraction": BertGenerationEncoder, "text-generation": BertGenerationDecoder}
if is_torch_available()
else {}
)
def a ( self : int ) -> Union[str, Any]:
__snake_case = BertGenerationEncoderTester(self )
__snake_case = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def a ( self : Any ) -> Optional[int]:
self.config_tester.run_common_tests()
def a ( self : List[Any] ) -> int:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def a ( self : Dict ) -> Optional[Any]:
__snake_case , __snake_case , __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs()
__snake_case = 'bert'
self.model_tester.create_and_check_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Union[str, Any] ) -> Union[str, Any]:
__snake_case = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*SCREAMING_SNAKE_CASE_ )
def a ( self : Dict ) -> Optional[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*SCREAMING_SNAKE_CASE_ )
def a ( self : int ) -> Union[str, Any]:
# This regression test was failing with PyTorch < 1.3
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
__snake_case = None
self.model_tester.create_and_check_model_as_decoder(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
def a ( self : Optional[int] ) -> Optional[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*SCREAMING_SNAKE_CASE_ )
@slow
def a ( self : Optional[Any] ) -> Any:
__snake_case = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@require_torch
class _lowercase ( unittest.TestCase ):
@slow
def a ( self : Optional[int] ) -> Optional[Any]:
__snake_case = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
__snake_case = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ )[0]
__snake_case = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
__snake_case = torch.tensor(
[[[0.1_7_7_5, 0.0_0_8_3, -0.0_3_2_1], [1.6_0_0_2, 0.1_2_8_7, 0.3_9_1_2], [2.1_4_7_3, 0.5_7_9_1, 0.6_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
@require_torch
class _lowercase ( unittest.TestCase ):
@slow
def a ( self : List[Any] ) -> Any:
__snake_case = BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
__snake_case = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ )[0]
__snake_case = torch.Size([1, 8, 5_0358] )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
__snake_case = torch.tensor(
[[[-0.5_7_8_8, -2.5_9_9_4, -3.7_0_5_4], [0.0_4_3_8, 4.7_9_9_7, 1.8_7_9_5], [1.5_8_6_2, 6.6_4_0_9, 4.4_6_3_8]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 56 |
'''simple docstring'''
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def _a (lowercase__ : str , lowercase__ : str , lowercase__ : Optional[str] = None ) -> str:
"""simple docstring"""
if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release:
# old versions of hfh don't url-encode the file path
__snake_case = quote(lowercase__ )
return hfh.hf_hub_url(lowercase__ , lowercase__ , repo_type='dataset' , revision=lowercase__ )
| 56 | 1 |
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 56 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def _a (lowercase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _lowercase ( nn.Module ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : nn.Module , SCREAMING_SNAKE_CASE_ : int ) -> str:
super().__init__()
__snake_case = module
__snake_case = nn.Sequential(
nn.Linear(module.in_features , SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ ) , nn.Linear(SCREAMING_SNAKE_CASE_ , module.out_features , bias=SCREAMING_SNAKE_CASE_ ) , )
__snake_case = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=SCREAMING_SNAKE_CASE_ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Union[str, Any]:
return self.module(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) + self.adapter(SCREAMING_SNAKE_CASE_ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowercase ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
_SCREAMING_SNAKE_CASE : Tuple = "bigscience/bloom-1b7"
# Constant values
_SCREAMING_SNAKE_CASE : Union[str, Any] = 2.109659552692574
_SCREAMING_SNAKE_CASE : Optional[Any] = "Hello my name is"
_SCREAMING_SNAKE_CASE : List[str] = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
_SCREAMING_SNAKE_CASE : Dict = 1_0
def a ( self : Optional[Any] ) -> List[Any]:
# Models and tokenizer
__snake_case = AutoTokenizer.from_pretrained(self.model_name )
class _lowercase ( __lowercase ):
def a ( self : Union[str, Any] ) -> List[str]:
super().setUp()
# Models and tokenizer
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto' )
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
def a ( self : Optional[Any] ) -> Any:
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def a ( self : Optional[Any] ) -> int:
__snake_case = self.model_abit.config
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'quantization_config' ) )
__snake_case = config.to_dict()
__snake_case = config.to_diff_dict()
__snake_case = config.to_json_string()
def a ( self : Optional[Any] ) -> str:
from bitsandbytes.nn import Paramsabit
__snake_case = self.model_fpaa.get_memory_footprint()
__snake_case = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
__snake_case = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def a ( self : Union[str, Any] ) -> Optional[Any]:
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(SCREAMING_SNAKE_CASE_ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def a ( self : Union[str, Any] ) -> int:
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
__snake_case = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) , self.EXPECTED_OUTPUTS )
def a ( self : Optional[Any] ) -> Dict:
__snake_case = BitsAndBytesConfig()
__snake_case = True
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
__snake_case = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) , self.EXPECTED_OUTPUTS )
def a ( self : List[Any] ) -> str:
with self.assertRaises(SCREAMING_SNAKE_CASE_ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(SCREAMING_SNAKE_CASE_ )
def a ( self : Any ) -> Union[str, Any]:
__snake_case = BitsAndBytesConfig()
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=SCREAMING_SNAKE_CASE_ , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' , bnb_abit_quant_type='nf4' , )
def a ( self : Tuple ) -> Dict:
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with `str`
self.model_abit.to('cpu' )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0' ) )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
__snake_case = self.model_fpaa.to(torch.floataa )
__snake_case = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
__snake_case = self.model_fpaa.to('cpu' )
# Check this does not throw an error
__snake_case = self.model_fpaa.half()
# Check this does not throw an error
__snake_case = self.model_fpaa.float()
def a ( self : Tuple ) -> Union[str, Any]:
__snake_case = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowercase ( unittest.TestCase ):
@classmethod
def a ( cls : Union[str, Any] ) -> Dict:
__snake_case = 't5-small'
__snake_case = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
__snake_case = AutoTokenizer.from_pretrained(cls.model_name )
__snake_case = 'Translate in German: Hello, my dog is cute'
def a ( self : List[Any] ) -> str:
gc.collect()
torch.cuda.empty_cache()
def a ( self : int ) -> Optional[Any]:
from transformers import TaForConditionalGeneration
__snake_case = TaForConditionalGeneration._keep_in_fpaa_modules
__snake_case = None
# test with `t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
# test with `flan-t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
__snake_case = modules
def a ( self : List[str] ) -> Any:
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
# test with `flan-t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
class _lowercase ( __lowercase ):
def a ( self : Dict ) -> str:
super().setUp()
# model_name
__snake_case = 'bigscience/bloom-560m'
__snake_case = 't5-small'
# Different types of model
__snake_case = AutoModel.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# Sequence classification model
__snake_case = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# CausalLM model
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# Seq2seq model
__snake_case = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
def a ( self : int ) -> Dict:
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def a ( self : Any ) -> Optional[Any]:
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class _lowercase ( __lowercase ):
def a ( self : str ) -> Union[str, Any]:
super().setUp()
def a ( self : Optional[Any] ) -> str:
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def a ( self : Optional[int] ) -> List[str]:
__snake_case = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__snake_case = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class _lowercase ( __lowercase ):
def a ( self : Optional[int] ) -> Union[str, Any]:
super().setUp()
def a ( self : Optional[int] ) -> List[Any]:
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='balanced' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
# Second real batch
__snake_case = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) , self.EXPECTED_OUTPUTS )
class _lowercase ( __lowercase ):
def a ( self : Any ) -> str:
__snake_case = 'facebook/opt-350m'
super().setUp()
def a ( self : int ) -> List[Any]:
if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ):
return
# Step 1: freeze all parameters
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
__snake_case = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__snake_case = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(SCREAMING_SNAKE_CASE_ ) ):
__snake_case = LoRALayer(module.q_proj , rank=16 )
__snake_case = LoRALayer(module.k_proj , rank=16 )
__snake_case = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
__snake_case = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__snake_case = model.forward(**SCREAMING_SNAKE_CASE_ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(SCREAMING_SNAKE_CASE_ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = "gpt2-xl"
_SCREAMING_SNAKE_CASE : Optional[int] = 3.3191854854152187
| 56 | 1 |
'''simple docstring'''
def _a (lowercase__ : list , lowercase__ : list , lowercase__ : int ) -> int:
"""simple docstring"""
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError('The length of profit and weight must be same.' )
if max_weight <= 0:
raise ValueError('max_weight must greater than zero.' )
if any(p < 0 for p in profit ):
raise ValueError('Profit can not be negative.' )
if any(w < 0 for w in weight ):
raise ValueError('Weight can not be negative.' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
__snake_case = [p / w for p, w in zip(lowercase__ , lowercase__ )]
# Creating a copy of the list and sorting profit/weight in ascending order
__snake_case = sorted(lowercase__ )
# declaring useful variables
__snake_case = len(lowercase__ )
__snake_case = 0
__snake_case = 0
__snake_case = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
__snake_case = sorted_profit_by_weight[length - i - 1]
__snake_case = profit_by_weight.index(lowercase__ )
__snake_case = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"Input profits, weights, and then max_weight (all positive ints) separated by "
"spaces."
)
_a : str = [int(x) for x in input("Input profits separated by spaces: ").split()]
_a : str = [int(x) for x in input("Input weights separated by spaces: ").split()]
_a : Any = int(input("Max weight allowed: "))
# Function Call
calc_profit(profit, weight, max_weight)
| 56 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class _lowercase ( unittest.TestCase ):
def a ( self : int ) -> List[str]:
__snake_case = '| <pad> <unk> <s> </s> a b c d e f g h i j k'.split()
__snake_case = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
__snake_case = {
'unk_token': '<unk>',
'bos_token': '<s>',
'eos_token': '</s>',
}
__snake_case = {
'feature_size': 1,
'padding_value': 0.0,
'sampling_rate': 1_6000,
'return_attention_mask': False,
'do_normalize': True,
}
__snake_case = tempfile.mkdtemp()
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__snake_case = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE_ )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
with open(self.feature_extraction_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
# load decoder from hub
__snake_case = 'hf-internal-testing/ngram-beam-search-decoder'
def a ( self : Optional[int] , **SCREAMING_SNAKE_CASE_ : Tuple ) -> Dict:
__snake_case = self.add_kwargs_tokens_map.copy()
kwargs.update(SCREAMING_SNAKE_CASE_ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] , **SCREAMING_SNAKE_CASE_ : Any ) -> Optional[Any]:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def a ( self : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> Tuple:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **SCREAMING_SNAKE_CASE_ )
def a ( self : int ) -> Dict:
shutil.rmtree(self.tmpdirname )
def a ( self : int ) -> Tuple:
__snake_case = self.get_tokenizer()
__snake_case = self.get_feature_extractor()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
__snake_case = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE_ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , SCREAMING_SNAKE_CASE_ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , SCREAMING_SNAKE_CASE_ )
def a ( self : Dict ) -> Union[str, Any]:
__snake_case = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__snake_case = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def a ( self : str ) -> Tuple:
__snake_case = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['xx'] )
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE_ , 'include' ):
WavaVecaProcessorWithLM(
tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def a ( self : List[str] ) -> List[str]:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = floats_list((3, 1000) )
__snake_case = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
__snake_case = processor(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def a ( self : Tuple ) -> Tuple:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = 'This is a test string'
__snake_case = processor(text=SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer(SCREAMING_SNAKE_CASE_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a ( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any]=(2, 10, 16) , SCREAMING_SNAKE_CASE_ : Dict=77 ) -> Dict:
np.random.seed(SCREAMING_SNAKE_CASE_ )
return np.random.rand(*SCREAMING_SNAKE_CASE_ )
def a ( self : Any ) -> Tuple:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits(shape=(10, 16) , seed=13 )
__snake_case = processor.decode(SCREAMING_SNAKE_CASE_ )
__snake_case = decoder.decode_beams(SCREAMING_SNAKE_CASE_ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('</s> <s> </s>' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['fork'], ['spawn']] )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] ) -> Dict:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__snake_case = processor.batch_decode(SCREAMING_SNAKE_CASE_ )
else:
with get_context(SCREAMING_SNAKE_CASE_ ).Pool() as pool:
__snake_case = processor.batch_decode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = list(SCREAMING_SNAKE_CASE_ )
with get_context('fork' ).Pool() as p:
__snake_case = decoder.decode_beams_batch(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case , __snake_case , __snake_case = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.text )
self.assertListEqual(['<s> <s> </s>', '<s> <s> <s>'] , decoded_processor.text )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.logit_score )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.lm_score )
def a ( self : Any ) -> Dict:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits()
__snake_case = 15
__snake_case = -2_0.0
__snake_case = -4.0
__snake_case = processor.batch_decode(
SCREAMING_SNAKE_CASE_ , beam_width=SCREAMING_SNAKE_CASE_ , beam_prune_logp=SCREAMING_SNAKE_CASE_ , token_min_logp=SCREAMING_SNAKE_CASE_ , )
__snake_case = decoded_processor_out.text
__snake_case = list(SCREAMING_SNAKE_CASE_ )
with get_context('fork' ).Pool() as pool:
__snake_case = decoder.decode_beams_batch(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , beam_width=SCREAMING_SNAKE_CASE_ , beam_prune_logp=SCREAMING_SNAKE_CASE_ , token_min_logp=SCREAMING_SNAKE_CASE_ , )
__snake_case = [d[0][0] for d in decoded_decoder_out]
__snake_case = [d[0][2] for d in decoded_decoder_out]
__snake_case = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(['</s> <s> <s>', '<s> <s> <s>'] , SCREAMING_SNAKE_CASE_ )
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
def a ( self : Optional[Any] ) -> Tuple:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits()
__snake_case = 2.0
__snake_case = 5.0
__snake_case = -2_0.0
__snake_case = True
__snake_case = processor.batch_decode(
SCREAMING_SNAKE_CASE_ , alpha=SCREAMING_SNAKE_CASE_ , beta=SCREAMING_SNAKE_CASE_ , unk_score_offset=SCREAMING_SNAKE_CASE_ , lm_score_boundary=SCREAMING_SNAKE_CASE_ , )
__snake_case = decoded_processor_out.text
__snake_case = list(SCREAMING_SNAKE_CASE_ )
decoder.reset_params(
alpha=SCREAMING_SNAKE_CASE_ , beta=SCREAMING_SNAKE_CASE_ , unk_score_offset=SCREAMING_SNAKE_CASE_ , lm_score_boundary=SCREAMING_SNAKE_CASE_ , )
with get_context('fork' ).Pool() as pool:
__snake_case = decoder.decode_beams_batch(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
__snake_case = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(['<s> </s> <s> </s> </s>', '</s> </s> <s> </s> </s>'] , SCREAMING_SNAKE_CASE_ )
__snake_case = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -2_0.0 )
self.assertEqual(lm_model.score_boundary , SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] ) -> List[str]:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = processor.decoder.model_container[processor.decoder._model_key]
__snake_case = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
__snake_case = os.listdir(SCREAMING_SNAKE_CASE_ )
__snake_case = ['alphabet.json', 'language_model']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Dict ) -> Dict:
__snake_case = snapshot_download('hf-internal-testing/processor_with_lm' )
__snake_case = WavaVecaProcessorWithLM.from_pretrained(SCREAMING_SNAKE_CASE_ )
__snake_case = processor.decoder.model_container[processor.decoder._model_key]
__snake_case = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
__snake_case = os.listdir(SCREAMING_SNAKE_CASE_ )
__snake_case = os.listdir(SCREAMING_SNAKE_CASE_ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Any ) -> List[Any]:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = AutoProcessor.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = floats_list((3, 1000) )
__snake_case = processor_wavaveca(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
__snake_case = processor_auto(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
__snake_case = self._get_dummy_logits()
__snake_case = processor_wavaveca.batch_decode(SCREAMING_SNAKE_CASE_ )
__snake_case = processor_auto.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def a ( self : Dict ) -> Optional[int]:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
@staticmethod
def a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int:
__snake_case = [d[key] for d in offsets]
return retrieved_list
def a ( self : Optional[int] ) -> str:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = self._get_dummy_logits()[0]
__snake_case = processor.decode(SCREAMING_SNAKE_CASE_ , output_word_offsets=SCREAMING_SNAKE_CASE_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(' '.join(self.get_from_offsets(outputs['word_offsets'] , 'word' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'end_offset' ) , [1, 3, 5] )
def a ( self : Optional[Any] ) -> Optional[int]:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = self._get_dummy_logits()
__snake_case = processor.batch_decode(SCREAMING_SNAKE_CASE_ , output_word_offsets=SCREAMING_SNAKE_CASE_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(
[' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) for o in outputs['word_offsets']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'end_offset' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def a ( self : Optional[Any] ) -> Optional[Any]:
import torch
__snake_case = load_dataset('common_voice' , 'en' , split='train' , streaming=SCREAMING_SNAKE_CASE_ )
__snake_case = ds.cast_column('audio' , datasets.Audio(sampling_rate=1_6000 ) )
__snake_case = iter(SCREAMING_SNAKE_CASE_ )
__snake_case = next(SCREAMING_SNAKE_CASE_ )
__snake_case = AutoProcessor.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
__snake_case = WavaVecaForCTC.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__snake_case = processor(sample['audio']['array'] , return_tensors='pt' ).input_values
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ ).logits.cpu().numpy()
__snake_case = processor.decode(logits[0] , output_word_offsets=SCREAMING_SNAKE_CASE_ )
__snake_case = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__snake_case = [
{
'start_time': d['start_offset'] * time_offset,
'end_time': d['end_offset'] * time_offset,
'word': d['word'],
}
for d in output['word_offsets']
]
__snake_case = 'WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'
# output words
self.assertEqual(' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) , output.text )
# output times
__snake_case = torch.tensor(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'start_time' ) )
__snake_case = torch.tensor(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'end_time' ) )
# fmt: off
__snake_case = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
__snake_case = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=0.0_1 ) )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=0.0_1 ) )
| 56 | 1 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_a : Union[str, Any] = logging.get_logger(__name__)
_a : List[str] = {
"vocab_file": "vocab.txt",
"merges_file": "bpe.codes",
}
_a : Dict = {
"vocab_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt",
},
"merges_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes",
},
}
_a : Dict = {
"vinai/phobert-base": 256,
"vinai/phobert-large": 256,
}
def _a (lowercase__ : List[Any] ) -> Tuple:
"""simple docstring"""
__snake_case = set()
__snake_case = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__snake_case = char
__snake_case = set(lowercase__ )
return pairs
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : Dict = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : int = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str="<s>" , SCREAMING_SNAKE_CASE_ : Dict="</s>" , SCREAMING_SNAKE_CASE_ : Any="</s>" , SCREAMING_SNAKE_CASE_ : Any="<s>" , SCREAMING_SNAKE_CASE_ : Any="<unk>" , SCREAMING_SNAKE_CASE_ : Optional[int]="<pad>" , SCREAMING_SNAKE_CASE_ : List[str]="<mask>" , **SCREAMING_SNAKE_CASE_ : List[Any] , ) -> List[str]:
super().__init__(
bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
__snake_case = vocab_file
__snake_case = merges_file
__snake_case = {}
__snake_case = 0
__snake_case = 1
__snake_case = 2
__snake_case = 3
self.add_from_file(SCREAMING_SNAKE_CASE_ )
__snake_case = {v: k for k, v in self.encoder.items()}
with open(SCREAMING_SNAKE_CASE_ , encoding='utf-8' ) as merges_handle:
__snake_case = merges_handle.read().split('\n' )[:-1]
__snake_case = [tuple(merge.split()[:-1] ) for merge in merges]
__snake_case = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
__snake_case = {}
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__snake_case = [self.cls_token_id]
__snake_case = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a ( self : Dict , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def a ( self : Any , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]:
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def a ( self : Dict ) -> Any:
return len(self.encoder )
def a ( self : Dict ) -> Optional[int]:
return dict(self.encoder , **self.added_tokens_encoder )
def a ( self : Any , SCREAMING_SNAKE_CASE_ : int ) -> List[Any]:
if token in self.cache:
return self.cache[token]
__snake_case = tuple(SCREAMING_SNAKE_CASE_ )
__snake_case = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
__snake_case = get_pairs(SCREAMING_SNAKE_CASE_ )
if not pairs:
return token
while True:
__snake_case = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
__snake_case , __snake_case = bigram
__snake_case = []
__snake_case = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
try:
__snake_case = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__snake_case = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__snake_case = tuple(SCREAMING_SNAKE_CASE_ )
__snake_case = new_word
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
else:
__snake_case = get_pairs(SCREAMING_SNAKE_CASE_ )
__snake_case = '@@ '.join(SCREAMING_SNAKE_CASE_ )
__snake_case = word[:-4]
__snake_case = word
return word
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[Any]:
__snake_case = []
__snake_case = re.findall(r'\S+\n?' , SCREAMING_SNAKE_CASE_ )
for token in words:
split_tokens.extend(list(self.bpe(SCREAMING_SNAKE_CASE_ ).split(' ' ) ) )
return split_tokens
def a ( self : int , SCREAMING_SNAKE_CASE_ : Dict ) -> Optional[Any]:
return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] ) -> List[Any]:
return self.decoder.get(SCREAMING_SNAKE_CASE_ , self.unk_token )
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple ) -> Optional[int]:
__snake_case = ' '.join(SCREAMING_SNAKE_CASE_ ).replace('@@ ' , '' ).strip()
return out_string
def a ( self : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__snake_case = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
__snake_case = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
if os.path.abspath(self.merges_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ):
copyfile(self.merges_file , SCREAMING_SNAKE_CASE_ )
return out_vocab_file, out_merge_file
def a ( self : Any , SCREAMING_SNAKE_CASE_ : Dict ) -> List[Any]:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
try:
with open(SCREAMING_SNAKE_CASE_ , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(SCREAMING_SNAKE_CASE_ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'Incorrect encoding detected in {f}, please rebuild the dataset' )
return
__snake_case = f.readlines()
for lineTmp in lines:
__snake_case = lineTmp.strip()
__snake_case = line.rfind(' ' )
if idx == -1:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt>\'' )
__snake_case = line[:idx]
__snake_case = len(self.encoder )
| 56 |
'''simple docstring'''
def _a (lowercase__ : int , lowercase__ : int ) -> float:
"""simple docstring"""
return base * power(lowercase__ , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print("Raise base to the power of exponent using recursion...")
_a : Union[str, Any] = int(input("Enter the base: ").strip())
_a : Any = int(input("Enter the exponent: ").strip())
_a : List[str] = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
_a : List[Any] = 1 / result
print(f'''{base} to the power of {exponent} is {result}''')
| 56 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
class _lowercase :
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : list[str] ) -> Optional[int]:
__snake_case = []
self.adlist.append(
{'value': '', 'next_states': [], 'fail_state': 0, 'output': []} )
for keyword in keywords:
self.add_keyword(SCREAMING_SNAKE_CASE_ )
self.set_fail_transitions()
def a ( self : List[str] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str ) -> int | None:
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def a ( self : str , SCREAMING_SNAKE_CASE_ : str ) -> None:
__snake_case = 0
for character in keyword:
__snake_case = self.find_next_state(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if next_state is None:
self.adlist.append(
{
'value': character,
'next_states': [],
'fail_state': 0,
'output': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
__snake_case = len(self.adlist ) - 1
else:
__snake_case = next_state
self.adlist[current_state]["output"].append(SCREAMING_SNAKE_CASE_ )
def a ( self : str ) -> None:
__snake_case = deque()
for node in self.adlist[0]["next_states"]:
q.append(SCREAMING_SNAKE_CASE_ )
__snake_case = 0
while q:
__snake_case = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(SCREAMING_SNAKE_CASE_ )
__snake_case = self.adlist[r]['fail_state']
while (
self.find_next_state(SCREAMING_SNAKE_CASE_ , self.adlist[child]['value'] ) is None
and state != 0
):
__snake_case = self.adlist[state]['fail_state']
__snake_case = self.find_next_state(
SCREAMING_SNAKE_CASE_ , self.adlist[child]['value'] )
if self.adlist[child]["fail_state"] is None:
__snake_case = 0
__snake_case = (
self.adlist[child]['output']
+ self.adlist[self.adlist[child]['fail_state']]['output']
)
def a ( self : Tuple , SCREAMING_SNAKE_CASE_ : str ) -> dict[str, list[int]]:
__snake_case = {} # returns a dict with keywords and list of its occurrences
__snake_case = 0
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
while (
self.find_next_state(SCREAMING_SNAKE_CASE_ , string[i] ) is None
and current_state != 0
):
__snake_case = self.adlist[current_state]['fail_state']
__snake_case = self.find_next_state(SCREAMING_SNAKE_CASE_ , string[i] )
if next_state is None:
__snake_case = 0
else:
__snake_case = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
__snake_case = []
result[key].append(i - len(SCREAMING_SNAKE_CASE_ ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 |
'''simple docstring'''
import math
from collections.abc import Callable
def _a (lowercase__ : Callable[[float], float] , lowercase__ : float , lowercase__ : float ) -> float:
"""simple docstring"""
__snake_case = xa
__snake_case = xa
while True:
if x_n == x_na or function(lowercase__ ) == function(lowercase__ ):
raise ZeroDivisionError('float division by zero, could not find root' )
__snake_case = x_na - (
function(lowercase__ ) / ((function(lowercase__ ) - function(lowercase__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 1_0**-5:
return x_na
__snake_case = x_na
__snake_case = x_na
def _a (lowercase__ : float ) -> float:
"""simple docstring"""
return math.pow(lowercase__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 56 | 1 |
'''simple docstring'''
def _a (lowercase__ : int ) -> bool:
"""simple docstring"""
if not isinstance(lowercase__ , lowercase__ ):
raise ValueError('check_bouncy() accepts only integer arguments' )
__snake_case = str(lowercase__ )
__snake_case = ''.join(sorted(lowercase__ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def _a (lowercase__ : float = 9_9 ) -> int:
"""simple docstring"""
if not 0 < percent < 1_0_0:
raise ValueError('solution() only accepts values from 0 to 100' )
__snake_case = 0
__snake_case = 1
while True:
if check_bouncy(lowercase__ ):
bouncy_num += 1
if (bouncy_num / num) * 1_0_0 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(99)}''')
| 56 |
'''simple docstring'''
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class _lowercase ( __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : str = CpmAntTokenizer
_SCREAMING_SNAKE_CASE : Optional[Any] = False
def a ( self : Optional[Any] ) -> Any:
super().setUp()
__snake_case = [
'<d>',
'</d>',
'<s>',
'</s>',
'</_>',
'<unk>',
'<pad>',
'</n>',
'我',
'是',
'C',
'P',
'M',
'A',
'n',
't',
]
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
@tooslow
def a ( self : List[Any] ) -> Dict:
__snake_case = CpmAntTokenizer.from_pretrained('openbmb/cpm-ant-10b' )
__snake_case = '今天天气真好!'
__snake_case = ['今天', '天气', '真', '好', '!']
__snake_case = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = '今天天气真好!'
__snake_case = [tokenizer.bos_token] + tokens
__snake_case = [6, 9802, 1_4962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 56 | 1 |
'''simple docstring'''
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
_a : Tuple = logging.get_logger(__name__)
class _lowercase :
def __init__( self : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> List[str]:
__snake_case = question_encoder
__snake_case = generator
__snake_case = self.question_encoder
def a ( self : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int:
if os.path.isfile(SCREAMING_SNAKE_CASE_ ):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
__snake_case = os.path.join(SCREAMING_SNAKE_CASE_ , 'question_encoder_tokenizer' )
__snake_case = os.path.join(SCREAMING_SNAKE_CASE_ , 'generator_tokenizer' )
self.question_encoder.save_pretrained(SCREAMING_SNAKE_CASE_ )
self.generator.save_pretrained(SCREAMING_SNAKE_CASE_ )
@classmethod
def a ( cls : Any , SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> str:
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
__snake_case = kwargs.pop('config' , SCREAMING_SNAKE_CASE_ )
if config is None:
__snake_case = RagConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
__snake_case = AutoTokenizer.from_pretrained(
SCREAMING_SNAKE_CASE_ , config=config.question_encoder , subfolder='question_encoder_tokenizer' )
__snake_case = AutoTokenizer.from_pretrained(
SCREAMING_SNAKE_CASE_ , config=config.generator , subfolder='generator_tokenizer' )
return cls(question_encoder=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ )
def __call__( self : str , *SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : Any ) -> str:
return self.current_tokenizer(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def a ( self : Any , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Union[str, Any]:
return self.generator.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def a ( self : List[str] , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Dict ) -> Dict:
return self.generator.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def a ( self : Union[str, Any] ) -> Tuple:
__snake_case = self.question_encoder
def a ( self : Optional[int] ) -> List[str]:
__snake_case = self.generator
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[List[str]] = None , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : str = "longest" , SCREAMING_SNAKE_CASE_ : str = None , SCREAMING_SNAKE_CASE_ : bool = True , **SCREAMING_SNAKE_CASE_ : List[str] , ) -> BatchEncoding:
warnings.warn(
'`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '
'regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '
'context manager to prepare your targets. See the documentation of your specific tokenizer for more '
'details' , SCREAMING_SNAKE_CASE_ , )
if max_length is None:
__snake_case = self.current_tokenizer.model_max_length
__snake_case = self(
SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
__snake_case = self.current_tokenizer.model_max_length
__snake_case = self(
text_target=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
__snake_case = labels['input_ids']
return model_inputs
| 56 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def _a (lowercase__ : list ) -> int:
"""simple docstring"""
if not postfix_notation:
return 0
__snake_case = {'+', '-', '*', '/'}
__snake_case = []
for token in postfix_notation:
if token in operations:
__snake_case , __snake_case = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(lowercase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowercase ( __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : int = OpenAIGPTTokenizer
_SCREAMING_SNAKE_CASE : int = OpenAIGPTTokenizerFast
_SCREAMING_SNAKE_CASE : Any = True
_SCREAMING_SNAKE_CASE : Tuple = False
def a ( self : List[str] ) -> Any:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__snake_case = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
__snake_case = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
__snake_case = ['#version: 0.2', 'l o', 'lo w', 'e r</w>', '']
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(SCREAMING_SNAKE_CASE_ ) )
def a ( self : Tuple , SCREAMING_SNAKE_CASE_ : Dict ) -> Optional[Any]:
return "lower newer", "lower newer"
def a ( self : List[str] ) -> Tuple:
__snake_case = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
__snake_case = 'lower'
__snake_case = ['low', 'er</w>']
__snake_case = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = tokens + ['<unk>']
__snake_case = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str=15 ) -> Optional[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__snake_case = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# Simple input
__snake_case = 'This is a simple input'
__snake_case = ['This is a simple input 1', 'This is a simple input 2']
__snake_case = ('This is a simple input', 'This is a pair')
__snake_case = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='max_length' )
# Simple input
self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='max_length' )
# Simple input
self.assertRaises(
SCREAMING_SNAKE_CASE_ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='max_length' , )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='max_length' )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='max_length' )
# Pair input
self.assertRaises(
SCREAMING_SNAKE_CASE_ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='max_length' , )
def a ( self : Union[str, Any] ) -> Any:
pass
@require_ftfy
@require_spacy
@require_tokenizers
class _lowercase ( __lowercase ):
pass
| 56 |
'''simple docstring'''
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square(lowercase__ : int , lowercase__ : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
__snake_case = update_area_of_max_square(lowercase__ , col + 1 )
__snake_case = update_area_of_max_square(row + 1 , col + 1 )
__snake_case = update_area_of_max_square(row + 1 , lowercase__ )
if mat[row][col]:
__snake_case = 1 + min([right, diagonal, down] )
__snake_case = max(largest_square_area[0] , lowercase__ )
return sub_problem_sol
else:
return 0
__snake_case = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
__snake_case = update_area_of_max_square_using_dp_array(lowercase__ , col + 1 , lowercase__ )
__snake_case = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , lowercase__ )
__snake_case = update_area_of_max_square_using_dp_array(row + 1 , lowercase__ , lowercase__ )
if mat[row][col]:
__snake_case = 1 + min([right, diagonal, down] )
__snake_case = max(largest_square_area[0] , lowercase__ )
__snake_case = sub_problem_sol
return sub_problem_sol
else:
return 0
__snake_case = [0]
__snake_case = [[-1] * cols for _ in range(lowercase__ )]
update_area_of_max_square_using_dp_array(0 , 0 , lowercase__ )
return largest_square_area[0]
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
"""simple docstring"""
__snake_case = [[0] * (cols + 1) for _ in range(rows + 1 )]
__snake_case = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__snake_case = dp_array[row][col + 1]
__snake_case = dp_array[row + 1][col + 1]
__snake_case = dp_array[row + 1][col]
if mat[row][col] == 1:
__snake_case = 1 + min(lowercase__ , lowercase__ , lowercase__ )
__snake_case = max(dp_array[row][col] , lowercase__ )
else:
__snake_case = 0
return largest_square_area
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
"""simple docstring"""
__snake_case = [0] * (cols + 1)
__snake_case = [0] * (cols + 1)
__snake_case = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__snake_case = current_row[col + 1]
__snake_case = next_row[col + 1]
__snake_case = next_row[col]
if mat[row][col] == 1:
__snake_case = 1 + min(lowercase__ , lowercase__ , lowercase__ )
__snake_case = max(current_row[col] , lowercase__ )
else:
__snake_case = 0
__snake_case = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 56 | 1 |
'''simple docstring'''
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
_a : Dict = 0B10_11_00_11_11_10_11_00_10_01_00_00_01_11_10_11_10_11_00_01_10_01_11_10
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
_a : Tuple = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class _lowercase :
def __init__( self : List[Any] ) -> int:
__snake_case = WATERMARK_BITS
__snake_case = WatermarkEncoder()
self.encoder.set_watermark('bits' , self.watermark )
def a ( self : Tuple , SCREAMING_SNAKE_CASE_ : torch.FloatTensor ) -> Dict:
# can't encode images that are smaller than 256
if images.shape[-1] < 256:
return images
__snake_case = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__snake_case = [self.encoder.encode(SCREAMING_SNAKE_CASE_ , 'dwtDct' ) for image in images]
__snake_case = torch.from_numpy(np.array(SCREAMING_SNAKE_CASE_ ) ).permute(0 , 3 , 1 , 2 )
__snake_case = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 56 |
'''simple docstring'''
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def _a () -> Union[str, Any]:
"""simple docstring"""
__snake_case = 1_0
__snake_case = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
__snake_case = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [9_7], 'text': ['1976']}] * 1_0,
'id': list(range(lowercase__ ) ),
} , features=lowercase__ , )
return dataset
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple , lowercase__ : Dict ) -> Tuple:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=lowercase__ )
return filename
# FILE_CONTENT + files
_a : Union[str, Any] = "\\n Text data.\n Second line of data."
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> List[str]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt'
__snake_case = FILE_CONTENT
with open(lowercase__ , 'w' ) as f:
f.write(lowercase__ )
return filename
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
import bza
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
__snake_case = bytes(lowercase__ , 'utf-8' )
with bza.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Union[str, Any] ) -> Dict:
"""simple docstring"""
import gzip
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
__snake_case = bytes(lowercase__ , 'utf-8' )
with gzip.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple ) -> Optional[int]:
"""simple docstring"""
if datasets.config.LZ4_AVAILABLE:
import lza.frame
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
__snake_case = bytes(lowercase__ , 'utf-8' )
with lza.frame.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Dict , lowercase__ : Tuple ) -> Tuple:
"""simple docstring"""
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(lowercase__ , 'w' ) as archive:
archive.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[int] , lowercase__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
import tarfile
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(lowercase__ , 'w' ) as f:
f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> Tuple:
"""simple docstring"""
import lzma
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
__snake_case = bytes(lowercase__ , 'utf-8' )
with lzma.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Dict , lowercase__ : str ) -> Union[str, Any]:
"""simple docstring"""
import zipfile
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> int:
"""simple docstring"""
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
__snake_case = bytes(lowercase__ , 'utf-8' )
with zstd.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.xml'
__snake_case = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(lowercase__ , 'w' ) as f:
f.write(lowercase__ )
return filename
_a : int = [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
_a : List[str] = [
{"col_1": "4", "col_2": 4, "col_3": 4.0},
{"col_1": "5", "col_2": 5, "col_3": 5.0},
]
_a : Tuple = {
"col_1": ["0", "1", "2", "3"],
"col_2": [0, 1, 2, 3],
"col_3": [0.0, 1.0, 2.0, 3.0],
}
_a : Optional[int] = [
{"col_3": 0.0, "col_1": "0", "col_2": 0},
{"col_3": 1.0, "col_1": "1", "col_2": 1},
]
_a : Any = [
{"col_1": "s0", "col_2": 0, "col_3": 0.0},
{"col_1": "s1", "col_2": 1, "col_3": 1.0},
{"col_1": "s2", "col_2": 2, "col_3": 2.0},
{"col_1": "s3", "col_2": 3, "col_3": 3.0},
]
@pytest.fixture(scope='session' )
def _a () -> Optional[Any]:
"""simple docstring"""
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case = datasets.Dataset.from_dict(lowercase__ )
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Any ) -> Dict:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(lowercase__ ) ) as con:
__snake_case = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> Tuple:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(lowercase__ , 'w' , newline='' ) as f:
__snake_case = csv.DictWriter(lowercase__ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> Tuple:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(lowercase__ , 'w' , newline='' ) as f:
__snake_case = csv.DictWriter(lowercase__ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : str , lowercase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
import bza
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(lowercase__ , 'rb' ) as f:
__snake_case = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] , lowercase__ : Union[str, Any] , lowercase__ : str ) -> Optional[int]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] , lowercase__ : Tuple , lowercase__ : int ) -> int:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(lowercase__ , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] , lowercase__ : Dict , lowercase__ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] ) -> int:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
__snake_case = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(lowercase__ , 'wb' ) as f:
__snake_case = pq.ParquetWriter(lowercase__ , schema=lowercase__ )
__snake_case = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowercase__ ) )] for k in DATA[0]} , schema=lowercase__ )
writer.write_table(lowercase__ )
writer.close()
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> List[str]:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
__snake_case = {'data': DATA}
with open(lowercase__ , 'w' ) as f:
json.dump(lowercase__ , lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] ) -> List[Any]:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
__snake_case = {'data': DATA_DICT_OF_LISTS}
with open(lowercase__ , 'w' ) as f:
json.dump(lowercase__ , lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(lowercase__ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowercase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(lowercase__ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowercase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : int ) -> Tuple:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(lowercase__ , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(lowercase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Dict ) -> int:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(lowercase__ , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(lowercase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : int , lowercase__ : List[Any] ) -> Dict:
"""simple docstring"""
import gzip
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(lowercase__ , 'rb' ) as orig_file:
with gzip.open(lowercase__ , 'wb' ) as zipped_file:
zipped_file.writelines(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Union[str, Any] , lowercase__ : Dict ) -> Optional[Any]:
"""simple docstring"""
import gzip
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(lowercase__ , 'rb' ) as orig_file:
with gzip.open(lowercase__ , 'wb' ) as zipped_file:
zipped_file.writelines(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : str , lowercase__ : str , lowercase__ : str ) -> Optional[int]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Dict , lowercase__ : Optional[int] , lowercase__ : List[Any] , lowercase__ : List[Any] ) -> str:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.join('nested' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : str ) -> Optional[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : str , lowercase__ : Optional[int] , lowercase__ : Tuple ) -> List[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(lowercase__ , 'w' ) as f:
f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple , lowercase__ : Tuple , lowercase__ : Dict , lowercase__ : int ) -> Optional[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(lowercase__ , 'w' ) as f:
f.add(lowercase__ , arcname=os.path.join('nested' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__snake_case = ['0', '1', '2', '3']
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(lowercase__ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] ) -> List[Any]:
"""simple docstring"""
__snake_case = ['0', '1', '2', '3']
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(lowercase__ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[int] ) -> Dict:
"""simple docstring"""
__snake_case = ['0', '1', '2', '3']
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(lowercase__ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] , lowercase__ : Union[str, Any] , lowercase__ : Any ) -> str:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple , lowercase__ : Any , lowercase__ : Tuple ) -> List[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] , lowercase__ : Optional[int] , lowercase__ : Any ) -> Union[str, Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename('unsupported.ext' ) )
f.write(lowercase__ , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Any ) -> List[Any]:
"""simple docstring"""
__snake_case = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(lowercase__ , 'w' , encoding='utf-8' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a () -> int:
"""simple docstring"""
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def _a () -> Optional[int]:
"""simple docstring"""
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] , lowercase__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : str ) -> List[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 1_0 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 1_0 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
return data_dir
| 56 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_a : Any = {
"configuration_resnet": ["RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ResNetConfig", "ResNetOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Union[str, Any] = [
"RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"ResNetForImageClassification",
"ResNetModel",
"ResNetPreTrainedModel",
"ResNetBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[str] = [
"TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFResNetForImageClassification",
"TFResNetModel",
"TFResNetPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Any = [
"FlaxResNetForImageClassification",
"FlaxResNetModel",
"FlaxResNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
_a : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 56 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a : Optional[Any] = logging.get_logger(__name__)
_a : Tuple = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = "camembert"
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3_0522 , SCREAMING_SNAKE_CASE_ : str=768 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=12 , SCREAMING_SNAKE_CASE_ : Dict=12 , SCREAMING_SNAKE_CASE_ : Optional[Any]=3072 , SCREAMING_SNAKE_CASE_ : Tuple="gelu" , SCREAMING_SNAKE_CASE_ : List[str]=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=512 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : Any=0.0_2 , SCREAMING_SNAKE_CASE_ : Tuple=1e-12 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1 , SCREAMING_SNAKE_CASE_ : Dict=0 , SCREAMING_SNAKE_CASE_ : int=2 , SCREAMING_SNAKE_CASE_ : Dict="absolute" , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : Tuple=None , **SCREAMING_SNAKE_CASE_ : Dict , ) -> int:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = position_embedding_type
__snake_case = use_cache
__snake_case = classifier_dropout
class _lowercase ( __lowercase ):
@property
def a ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__snake_case = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 56 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_a : Dict = logging.get_logger(__name__)
_a : Optional[Any] = {
"google/bit-50": "https://huggingface.co/google/bit-50/resolve/main/config.json",
}
class _lowercase ( __lowercase , __lowercase ):
_SCREAMING_SNAKE_CASE : Any = "bit"
_SCREAMING_SNAKE_CASE : List[str] = ["preactivation", "bottleneck"]
_SCREAMING_SNAKE_CASE : List[str] = ["SAME", "VALID"]
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[str]=3 , SCREAMING_SNAKE_CASE_ : int=64 , SCREAMING_SNAKE_CASE_ : str=[256, 512, 1024, 2048] , SCREAMING_SNAKE_CASE_ : Any=[3, 4, 6, 3] , SCREAMING_SNAKE_CASE_ : Union[str, Any]="preactivation" , SCREAMING_SNAKE_CASE_ : str="relu" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : Any=32 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE_ : int=False , SCREAMING_SNAKE_CASE_ : str=32 , SCREAMING_SNAKE_CASE_ : List[Any]=1 , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : Dict=None , **SCREAMING_SNAKE_CASE_ : Tuple , ) -> Any:
super().__init__(**SCREAMING_SNAKE_CASE_ )
if layer_type not in self.layer_types:
raise ValueError(f'layer_type={layer_type} is not one of {",".join(self.layer_types )}' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
__snake_case = global_padding.upper()
else:
raise ValueError(f'Padding strategy {global_padding} not supported' )
__snake_case = num_channels
__snake_case = embedding_size
__snake_case = hidden_sizes
__snake_case = depths
__snake_case = layer_type
__snake_case = hidden_act
__snake_case = global_padding
__snake_case = num_groups
__snake_case = drop_path_rate
__snake_case = embedding_dynamic_padding
__snake_case = output_stride
__snake_case = width_factor
__snake_case = ['stem'] + [f'stage{idx}' for idx in range(1 , len(SCREAMING_SNAKE_CASE_ ) + 1 )]
__snake_case , __snake_case = get_aligned_output_features_output_indices(
out_features=SCREAMING_SNAKE_CASE_ , out_indices=SCREAMING_SNAKE_CASE_ , stage_names=self.stage_names )
| 56 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : List[str] = logging.get_logger(__name__)
_a : Dict = {
"facebook/timesformer": "https://huggingface.co/facebook/timesformer/resolve/main/config.json",
}
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : int = "timesformer"
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : List[str]=224 , SCREAMING_SNAKE_CASE_ : List[str]=16 , SCREAMING_SNAKE_CASE_ : Any=3 , SCREAMING_SNAKE_CASE_ : int=8 , SCREAMING_SNAKE_CASE_ : Tuple=768 , SCREAMING_SNAKE_CASE_ : int=12 , SCREAMING_SNAKE_CASE_ : Optional[int]=12 , SCREAMING_SNAKE_CASE_ : Optional[int]=3072 , SCREAMING_SNAKE_CASE_ : Tuple="gelu" , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.0 , SCREAMING_SNAKE_CASE_ : List[Any]=0.0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0_2 , SCREAMING_SNAKE_CASE_ : Any=1e-6 , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : List[str]="divided_space_time" , SCREAMING_SNAKE_CASE_ : int=0 , **SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> List[str]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = num_frames
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = qkv_bias
__snake_case = attention_type
__snake_case = drop_path_rate
| 56 | 1 |
'''simple docstring'''
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class _lowercase ( __lowercase ):
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str = "▁" , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Union[str, AddedToken] = "<unk>" , SCREAMING_SNAKE_CASE_ : Union[str, AddedToken] = "</s>" , SCREAMING_SNAKE_CASE_ : Union[str, AddedToken] = "<pad>" , ) -> str:
__snake_case = {
'pad': {'id': 0, 'token': pad_token},
'eos': {'id': 1, 'token': eos_token},
'unk': {'id': 2, 'token': unk_token},
}
__snake_case = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
__snake_case = token_dict['token']
__snake_case = Tokenizer(Unigram() )
__snake_case = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(' {2,}' ) , ' ' ),
normalizers.Lowercase(),
] )
__snake_case = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ ),
pre_tokenizers.Digits(individual_digits=SCREAMING_SNAKE_CASE_ ),
pre_tokenizers.Punctuation(),
] )
__snake_case = decoders.Metaspace(replacement=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ )
__snake_case = TemplateProcessing(
single=f'$A {self.special_tokens["eos"]["token"]}' , special_tokens=[(self.special_tokens['eos']['token'], self.special_tokens['eos']['id'])] , )
__snake_case = {
'model': 'SentencePieceUnigram',
'replacement': replacement,
'add_prefix_space': add_prefix_space,
}
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, List[str]] , SCREAMING_SNAKE_CASE_ : int = 8000 , SCREAMING_SNAKE_CASE_ : bool = True , ) -> Tuple:
__snake_case = trainers.UnigramTrainer(
vocab_size=SCREAMING_SNAKE_CASE_ , special_tokens=self.special_tokens_list , show_progress=SCREAMING_SNAKE_CASE_ , )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__snake_case = [files]
self._tokenizer.train(SCREAMING_SNAKE_CASE_ , trainer=SCREAMING_SNAKE_CASE_ )
self.add_unk_id()
def a ( self : str , SCREAMING_SNAKE_CASE_ : Union[Iterator[str], Iterator[Iterator[str]]] , SCREAMING_SNAKE_CASE_ : int = 8000 , SCREAMING_SNAKE_CASE_ : bool = True , ) -> str:
__snake_case = trainers.UnigramTrainer(
vocab_size=SCREAMING_SNAKE_CASE_ , special_tokens=self.special_tokens_list , show_progress=SCREAMING_SNAKE_CASE_ , )
self._tokenizer.train_from_iterator(SCREAMING_SNAKE_CASE_ , trainer=SCREAMING_SNAKE_CASE_ )
self.add_unk_id()
def a ( self : Dict ) -> str:
__snake_case = json.loads(self._tokenizer.to_str() )
__snake_case = self.special_tokens['unk']['id']
__snake_case = Tokenizer.from_str(json.dumps(SCREAMING_SNAKE_CASE_ ) )
| 56 |
'''simple docstring'''
from typing import Any
class _lowercase :
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Any ) -> Any:
__snake_case = data
__snake_case = None
class _lowercase :
def __init__( self : List[Any] ) -> Tuple:
__snake_case = None
def a ( self : int ) -> Union[str, Any]:
__snake_case = self.head
while temp is not None:
print(temp.data , end=' ' )
__snake_case = temp.next
print()
def a ( self : Dict , SCREAMING_SNAKE_CASE_ : Any ) -> List[str]:
__snake_case = Node(SCREAMING_SNAKE_CASE_ )
__snake_case = self.head
__snake_case = new_node
def a ( self : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any ) -> List[str]:
if node_data_a == node_data_a:
return
else:
__snake_case = self.head
while node_a is not None and node_a.data != node_data_a:
__snake_case = node_a.next
__snake_case = self.head
while node_a is not None and node_a.data != node_data_a:
__snake_case = node_a.next
if node_a is None or node_a is None:
return
__snake_case , __snake_case = node_a.data, node_a.data
if __name__ == "__main__":
_a : Dict = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("After swapping")
ll.print_list()
| 56 | 1 |
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training")
# TF training parameters
_a : Optional[int] = False
_a : Dict = False
def _a (lowercase__ : Namespace ) -> int:
"""simple docstring"""
return TrainCommand(lowercase__ )
class _lowercase ( __lowercase ):
@staticmethod
def a ( SCREAMING_SNAKE_CASE_ : ArgumentParser ) -> Any:
__snake_case = parser.add_parser('train' , help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , )
train_parser.add_argument(
'--column_label' , type=SCREAMING_SNAKE_CASE_ , default=0 , help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' , type=SCREAMING_SNAKE_CASE_ , default=1 , help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' , type=SCREAMING_SNAKE_CASE_ , default=2 , help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' , type=SCREAMING_SNAKE_CASE_ , default='' , help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' , type=SCREAMING_SNAKE_CASE_ , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , )
train_parser.add_argument('--output' , type=SCREAMING_SNAKE_CASE_ , default='./' , help='path to saved the trained model.' )
train_parser.add_argument(
'--task' , type=SCREAMING_SNAKE_CASE_ , default='text_classification' , help='Task to train the model on.' )
train_parser.add_argument(
'--model' , type=SCREAMING_SNAKE_CASE_ , default='bert-base-uncased' , help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' , type=SCREAMING_SNAKE_CASE_ , default=32 , help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' , type=SCREAMING_SNAKE_CASE_ , default=64 , help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' , type=SCREAMING_SNAKE_CASE_ , default=3e-5 , help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' , type=SCREAMING_SNAKE_CASE_ , default=1e-08 , help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=SCREAMING_SNAKE_CASE_ )
def __init__( self : str , SCREAMING_SNAKE_CASE_ : Namespace ) -> Optional[int]:
__snake_case = logging.get_logger('transformers-cli/training' )
__snake_case = 'tf' if is_tf_available() else 'torch'
os.makedirs(args.output , exist_ok=SCREAMING_SNAKE_CASE_ )
__snake_case = args.output
__snake_case = args.column_label
__snake_case = args.column_text
__snake_case = args.column_id
self.logger.info(f'Loading {args.task} pipeline for {args.model}' )
if args.task == "text_classification":
__snake_case = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f'Loading dataset from {args.train_data}' )
__snake_case = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
__snake_case = None
if args.validation_data:
self.logger.info(f'Loading validation dataset from {args.validation_data}' )
__snake_case = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
__snake_case = args.validation_split
__snake_case = args.train_batch_size
__snake_case = args.valid_batch_size
__snake_case = args.learning_rate
__snake_case = args.adam_epsilon
def a ( self : Optional[int] ) -> int:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def a ( self : Tuple ) -> List[str]:
raise NotImplementedError
def a ( self : Dict ) -> Optional[int]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 56 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_a : int = {
"configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"],
"tokenization_tapas": ["TapasTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
"TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TapasForMaskedLM",
"TapasForQuestionAnswering",
"TapasForSequenceClassification",
"TapasModel",
"TapasPreTrainedModel",
"load_tf_weights_in_tapas",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = [
"TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFTapasForMaskedLM",
"TFTapasForQuestionAnswering",
"TFTapasForSequenceClassification",
"TFTapasModel",
"TFTapasPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
_a : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 56 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
_a : Any = logging.get_logger(__name__)
class _lowercase ( __lowercase ):
def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : List[str] ) -> None:
warnings.warn(
'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use YolosImageProcessor instead.' , SCREAMING_SNAKE_CASE_ , )
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 56 |
'''simple docstring'''
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _lowercase ( __lowercase , __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : List[str] = AutoencoderKL
_SCREAMING_SNAKE_CASE : Union[str, Any] = "sample"
_SCREAMING_SNAKE_CASE : Union[str, Any] = 1e-2
@property
def a ( self : List[str] ) -> Optional[int]:
__snake_case = 4
__snake_case = 3
__snake_case = (32, 32)
__snake_case = floats_tensor((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE_ )
return {"sample": image}
@property
def a ( self : List[Any] ) -> List[Any]:
return (3, 32, 32)
@property
def a ( self : int ) -> int:
return (3, 32, 32)
def a ( self : Tuple ) -> Union[str, Any]:
__snake_case = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
__snake_case = self.dummy_input
return init_dict, inputs_dict
def a ( self : Optional[Any] ) -> Any:
pass
def a ( self : Tuple ) -> List[Any]:
pass
@unittest.skipIf(torch_device == 'mps' , 'Gradient checkpointing skipped on MPS' )
def a ( self : List[str] ) -> int:
# enable deterministic behavior for gradient checkpointing
__snake_case , __snake_case = self.prepare_init_args_and_inputs_for_common()
__snake_case = self.model_class(**SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
assert not model.is_gradient_checkpointing and model.training
__snake_case = model(**SCREAMING_SNAKE_CASE_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
__snake_case = torch.randn_like(SCREAMING_SNAKE_CASE_ )
__snake_case = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
__snake_case = self.model_class(**SCREAMING_SNAKE_CASE_ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(SCREAMING_SNAKE_CASE_ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
__snake_case = model_a(**SCREAMING_SNAKE_CASE_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
__snake_case = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
__snake_case = dict(model.named_parameters() )
__snake_case = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) )
def a ( self : int ) -> int:
__snake_case , __snake_case = AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy' , output_loading_info=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(SCREAMING_SNAKE_CASE_ )
__snake_case = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def a ( self : Optional[int] ) -> List[str]:
__snake_case = AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy' )
__snake_case = model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
if torch_device == "mps":
__snake_case = torch.manual_seed(0 )
else:
__snake_case = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
__snake_case = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
__snake_case = image.to(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ , sample_posterior=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).sample
__snake_case = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
__snake_case = torch.tensor(
[
-4.0_078e-01,
-3.8_323e-04,
-1.2_681e-01,
-1.1_462e-01,
2.0_095e-01,
1.0_893e-01,
-8.8_247e-02,
-3.0_361e-01,
-9.8_644e-03,
] )
elif torch_device == "cpu":
__snake_case = torch.tensor(
[-0.1_3_5_2, 0.0_8_7_8, 0.0_4_1_9, -0.0_8_1_8, -0.1_0_6_9, 0.0_6_8_8, -0.1_4_5_8, -0.4_4_4_6, -0.0_0_2_6] )
else:
__snake_case = torch.tensor(
[-0.2_4_2_1, 0.4_6_4_2, 0.2_5_0_7, -0.0_4_3_8, 0.0_6_8_2, 0.3_1_6_0, -0.2_0_1_8, -0.0_7_2_7, 0.2_4_8_5] )
self.assertTrue(torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , rtol=1e-2 ) )
@slow
class _lowercase ( unittest.TestCase ):
def a ( self : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any ) -> Union[str, Any]:
return f'gaussian_noise_s={seed}_shape={"_".join([str(SCREAMING_SNAKE_CASE_ ) for s in shape] )}.npy'
def a ( self : Optional[Any] ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : int , SCREAMING_SNAKE_CASE_ : Optional[Any]=0 , SCREAMING_SNAKE_CASE_ : int=(4, 3, 512, 512) , SCREAMING_SNAKE_CASE_ : str=False ) -> int:
__snake_case = torch.floataa if fpaa else torch.floataa
__snake_case = torch.from_numpy(load_hf_numpy(self.get_file_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) ).to(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
return image
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple="CompVis/stable-diffusion-v1-4" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False ) -> List[str]:
__snake_case = 'fp16' if fpaa else None
__snake_case = torch.floataa if fpaa else torch.floataa
__snake_case = AutoencoderKL.from_pretrained(
SCREAMING_SNAKE_CASE_ , subfolder='vae' , torch_dtype=SCREAMING_SNAKE_CASE_ , revision=SCREAMING_SNAKE_CASE_ , )
model.to(SCREAMING_SNAKE_CASE_ ).eval()
return model
def a ( self : List[str] , SCREAMING_SNAKE_CASE_ : Tuple=0 ) -> Union[str, Any]:
if torch_device == "mps":
return torch.manual_seed(SCREAMING_SNAKE_CASE_ )
return torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_3, 0.9_8_7_8, -0.0_4_9_5, -0.0_7_9_0, -0.2_7_0_9, 0.8_3_7_5, -0.2_0_6_0, -0.0_8_2_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_6, 0.1_1_6_8, 0.1_3_3_2, -0.4_8_4_0, -0.2_5_0_8, -0.0_7_9_1, -0.0_4_9_3, -0.4_0_8_9], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def a ( self : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_generator(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , sample_posterior=SCREAMING_SNAKE_CASE_ ).sample
assert sample.shape == image.shape
__snake_case = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__snake_case = torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_5_1_3, 0.0_2_8_9, 1.3_7_9_9, 0.2_1_6_6, -0.2_5_7_3, -0.0_8_7_1, 0.5_1_0_3, -0.0_9_9_9]],
[47, [-0.4_1_2_8, -0.1_3_2_0, -0.3_7_0_4, 0.1_9_6_5, -0.4_1_1_6, -0.2_3_3_2, -0.3_3_4_0, 0.2_2_4_7]],
# fmt: on
] )
@require_torch_gpu
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Union[str, Any]:
__snake_case = self.get_sd_vae_model(fpaa=SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , fpaa=SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_generator(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , sample_posterior=SCREAMING_SNAKE_CASE_ ).sample
assert sample.shape == image.shape
__snake_case = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE_ )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_9, 0.9_8_6_6, -0.0_4_8_7, -0.0_7_7_7, -0.2_7_1_6, 0.8_3_6_8, -0.2_0_5_5, -0.0_8_1_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_7, 0.1_1_4_7, 0.1_3_3_3, -0.4_8_4_1, -0.2_5_0_6, -0.0_8_0_5, -0.0_4_9_1, -0.4_0_8_5], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def a ( self : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict ) -> List[Any]:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ ).sample
assert sample.shape == image.shape
__snake_case = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__snake_case = torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_0_5_1, -0.1_8_0_3, -0.2_3_1_1, -0.2_1_1_4, -0.3_2_9_2, -0.3_5_7_4, -0.2_9_5_3, -0.3_3_2_3]],
[37, [-0.2_6_3_2, -0.2_6_2_5, -0.2_1_9_9, -0.2_7_4_1, -0.4_5_3_9, -0.4_9_9_0, -0.3_7_2_0, -0.4_9_2_5]],
# fmt: on
] )
@require_torch_gpu
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> int:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__snake_case = sample[-1, -2:, :2, -2:].flatten().cpu()
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE_ )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_3_6_9, 0.0_2_0_7, -0.0_7_7_6, -0.0_6_8_2, -0.1_7_4_7, -0.1_9_3_0, -0.1_4_6_5, -0.2_0_3_9]],
[16, [-0.1_6_2_8, -0.2_1_3_4, -0.2_7_4_7, -0.2_6_4_2, -0.3_7_7_4, -0.4_4_0_4, -0.3_6_8_7, -0.4_2_7_7]],
# fmt: on
] )
@require_torch_gpu
def a ( self : int , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> str:
__snake_case = self.get_sd_vae_model(fpaa=SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 64, 64) , fpaa=SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__snake_case = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE_ )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='xformers is not required when using PyTorch 2.0.' )
def a ( self : Any , SCREAMING_SNAKE_CASE_ : int ) -> Tuple:
__snake_case = self.get_sd_vae_model(fpaa=SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 64, 64) , fpaa=SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='xformers is not required when using PyTorch 2.0.' )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE_ : int ) -> str:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_0_0_1, 0.0_9_1_8, -2.6_9_8_4, -3.9_7_2_0, -3.2_0_9_9, -5.0_3_5_3, 1.7_3_3_8, -0.2_0_6_5, 3.4_2_6_7]],
[47, [-1.5_0_3_0, -4.3_8_7_1, -6.0_3_5_5, -9.1_1_5_7, -1.6_6_6_1, -2.7_8_5_3, 2.1_6_0_7, -5.0_8_2_3, 2.5_6_3_3]],
# fmt: on
] )
def a ( self : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Union[str, Any]:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_generator(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model.encode(SCREAMING_SNAKE_CASE_ ).latent_dist
__snake_case = dist.sample(generator=SCREAMING_SNAKE_CASE_ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
__snake_case = sample[0, -1, -3:, -3:].flatten().cpu()
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE_ )
__snake_case = 3e-3 if torch_device != 'mps' else 1e-2
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=SCREAMING_SNAKE_CASE_ )
| 56 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_a : Optional[Any] = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[Any] = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[int] = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_a : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 56 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _lowercase ( __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ShapEPipeline
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["prompt"]
_SCREAMING_SNAKE_CASE : Any = ["prompt"]
_SCREAMING_SNAKE_CASE : str = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
_SCREAMING_SNAKE_CASE : Optional[int] = False
@property
def a ( self : Any ) -> Optional[int]:
return 32
@property
def a ( self : List[Any] ) -> List[Any]:
return 32
@property
def a ( self : Tuple ) -> List[str]:
return self.time_input_dim * 4
@property
def a ( self : Dict ) -> Union[str, Any]:
return 8
@property
def a ( self : List[Any] ) -> Optional[Any]:
__snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def a ( self : Dict ) -> Any:
torch.manual_seed(0 )
__snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(SCREAMING_SNAKE_CASE_ )
@property
def a ( self : str ) -> Dict:
torch.manual_seed(0 )
__snake_case = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__snake_case = PriorTransformer(**SCREAMING_SNAKE_CASE_ )
return model
@property
def a ( self : Optional[Any] ) -> Dict:
torch.manual_seed(0 )
__snake_case = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__snake_case = ShapERenderer(**SCREAMING_SNAKE_CASE_ )
return model
def a ( self : Tuple ) -> Dict:
__snake_case = self.dummy_prior
__snake_case = self.dummy_text_encoder
__snake_case = self.dummy_tokenizer
__snake_case = self.dummy_renderer
__snake_case = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1024 , prediction_type='sample' , use_karras_sigmas=SCREAMING_SNAKE_CASE_ , clip_sample=SCREAMING_SNAKE_CASE_ , clip_sample_range=1.0 , )
__snake_case = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def a ( self : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int]=0 ) -> Union[str, Any]:
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
__snake_case = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
__snake_case = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
__snake_case = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def a ( self : Optional[Any] ) -> str:
__snake_case = 'cpu'
__snake_case = self.get_dummy_components()
__snake_case = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
__snake_case = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__snake_case = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) )
__snake_case = output.images[0]
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__snake_case = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a ( self : int ) -> List[str]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def a ( self : Dict ) -> Any:
__snake_case = torch_device == 'cpu'
__snake_case = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=SCREAMING_SNAKE_CASE_ , relax_max_difference=SCREAMING_SNAKE_CASE_ , )
def a ( self : Union[str, Any] ) -> str:
__snake_case = self.get_dummy_components()
__snake_case = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
__snake_case = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__snake_case = 1
__snake_case = 2
__snake_case = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
for key in inputs.keys():
if key in self.batch_params:
__snake_case = batch_size * [inputs[key]]
__snake_case = pipe(**SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
def a ( self : Optional[int] ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : Union[str, Any] ) -> Optional[Any]:
__snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
__snake_case = ShapEPipeline.from_pretrained('openai/shap-e' )
__snake_case = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__snake_case = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
__snake_case = pipe(
'a shark' , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 56 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
_a : int = logging.get_logger(__name__)
class _lowercase ( __lowercase ):
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> None:
warnings.warn(
'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use FlavaImageProcessor instead.' , SCREAMING_SNAKE_CASE_ , )
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 56 |
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
_a : Optional[Any] = 100
_a : Dict = set(range(3, NUM_PRIMES, 2))
primes.add(2)
_a : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_0_0 )
def _a (lowercase__ : int ) -> set[int]:
"""simple docstring"""
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
__snake_case = set()
__snake_case = 42
__snake_case = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def _a (lowercase__ : int = 5_0_0_0 ) -> int | None:
"""simple docstring"""
for number_to_partition in range(1 , lowercase__ ):
if len(partition(lowercase__ ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f'''{solution() = }''')
| 56 | 1 |
'''simple docstring'''
def _a (lowercase__ : str ) -> bool:
"""simple docstring"""
__snake_case = 0
for ch in input_str:
__snake_case = ord(lowercase__ )
__snake_case = pow(2 , lowercase__ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_a : str = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def _a () -> Dict:
"""simple docstring"""
__snake_case = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
__snake_case = get_sagemaker_input()
else:
__snake_case = get_cluster_input()
return config
def _a (lowercase__ : Union[str, Any]=None ) -> int:
"""simple docstring"""
if subparsers is not None:
__snake_case = subparsers.add_parser('config' , description=lowercase__ )
else:
__snake_case = argparse.ArgumentParser('Accelerate config command' , description=lowercase__ )
parser.add_argument(
'--config_file' , default=lowercase__ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=lowercase__ )
return parser
def _a (lowercase__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case = get_user_input()
if args.config_file is not None:
__snake_case = args.config_file
else:
if not os.path.isdir(lowercase__ ):
os.makedirs(lowercase__ )
__snake_case = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(lowercase__ )
else:
config.to_yaml_file(lowercase__ )
print(f'accelerate configuration saved at {config_file}' )
def _a () -> int:
"""simple docstring"""
__snake_case = config_command_parser()
__snake_case = parser.parse_args()
config_command(lowercase__ )
if __name__ == "__main__":
main()
| 56 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
_a : List[Any] = None
_a : Dict = logging.get_logger(__name__)
_a : List[str] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
_a : Optional[Any] = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/tokenizer.json",
},
}
_a : Union[str, Any] = {
"camembert-base": 512,
}
_a : List[str] = "▁"
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : Dict = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : str = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : List[Any] = ["input_ids", "attention_mask"]
_SCREAMING_SNAKE_CASE : Optional[int] = CamembertTokenizer
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]="<s>" , SCREAMING_SNAKE_CASE_ : List[str]="</s>" , SCREAMING_SNAKE_CASE_ : List[Any]="</s>" , SCREAMING_SNAKE_CASE_ : str="<s>" , SCREAMING_SNAKE_CASE_ : Any="<unk>" , SCREAMING_SNAKE_CASE_ : Dict="<pad>" , SCREAMING_SNAKE_CASE_ : List[Any]="<mask>" , SCREAMING_SNAKE_CASE_ : List[Any]=["<s>NOTUSED", "</s>NOTUSED"] , **SCREAMING_SNAKE_CASE_ : int , ) -> Dict:
# Mask token behave like a normal word, i.e. include the space before it
__snake_case = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
__snake_case = vocab_file
__snake_case = False if not self.vocab_file else True
def a ( self : str , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__snake_case = [self.cls_token_id]
__snake_case = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]:
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__snake_case = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 56 |
'''simple docstring'''
from __future__ import annotations
import math
def _a (lowercase__ : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowercase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
_a : Dict = [num for num in range(3, 100_001, 2) if not is_prime(num)]
def _a (lowercase__ : int ) -> list[int]:
"""simple docstring"""
if not isinstance(lowercase__ , lowercase__ ):
raise ValueError('n must be an integer' )
if n <= 0:
raise ValueError('n must be >= 0' )
__snake_case = []
for num in range(len(lowercase__ ) ):
__snake_case = 0
while 2 * i * i <= odd_composites[num]:
__snake_case = odd_composites[num] - 2 * i * i
if is_prime(lowercase__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(lowercase__ ) == n:
return list_nums
return []
def _a () -> int:
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 56 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a : Union[str, Any] = logging.get_logger(__name__)
_a : Any = {"vocab_file": "sentencepiece.bpe.model"}
_a : List[Any] = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
}
_a : Union[str, Any] = {
"moussaKam/mbarthez": 1_024,
"moussaKam/barthez": 1_024,
"moussaKam/barthez-orangesum-title": 1_024,
}
_a : Dict = "▁"
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : List[str] = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int]="<s>" , SCREAMING_SNAKE_CASE_ : Optional[int]="</s>" , SCREAMING_SNAKE_CASE_ : List[Any]="</s>" , SCREAMING_SNAKE_CASE_ : Tuple="<s>" , SCREAMING_SNAKE_CASE_ : List[str]="<unk>" , SCREAMING_SNAKE_CASE_ : Any="<pad>" , SCREAMING_SNAKE_CASE_ : str="<mask>" , SCREAMING_SNAKE_CASE_ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE_ : Optional[Any] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
__snake_case = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
__snake_case = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE_ , )
__snake_case = vocab_file
__snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE_ ) )
__snake_case = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
__snake_case = len(self.sp_model ) - 1
__snake_case = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def a ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__snake_case = [self.cls_token_id]
__snake_case = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]:
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def a ( self : List[str] ) -> Dict:
return len(self.sp_model )
def a ( self : Optional[int] ) -> List[str]:
__snake_case = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a ( self : Dict , SCREAMING_SNAKE_CASE_ : str ) -> List[str]:
return self.sp_model.encode(SCREAMING_SNAKE_CASE_ , out_type=SCREAMING_SNAKE_CASE_ )
def a ( self : Dict , SCREAMING_SNAKE_CASE_ : Tuple ) -> List[str]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__snake_case = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ )
return spm_id if spm_id else self.unk_token_id
def a ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Dict ) -> Any:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE_ )
def a ( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[int]:
__snake_case = []
__snake_case = ''
__snake_case = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ ) + token
__snake_case = True
__snake_case = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE_ )
__snake_case = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ )
return out_string.strip()
def __getstate__( self : List[str] ) -> Tuple:
__snake_case = self.__dict__.copy()
__snake_case = None
return state
def __setstate__( self : str , SCREAMING_SNAKE_CASE_ : List[Any] ) -> str:
__snake_case = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__snake_case = {}
__snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__snake_case = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE_ , 'wb' ) as fi:
__snake_case = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 56 |
'''simple docstring'''
from __future__ import annotations
def _a (lowercase__ : int , lowercase__ : int ) -> list[str]:
"""simple docstring"""
if partitions <= 0:
raise ValueError('partitions must be a positive number!' )
if partitions > number_of_bytes:
raise ValueError('partitions can not > number_of_bytes!' )
__snake_case = number_of_bytes // partitions
__snake_case = []
for i in range(lowercase__ ):
__snake_case = i * bytes_per_partition + 1
__snake_case = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 | 1 |
'''simple docstring'''
def _a (lowercase__ : str , lowercase__ : list[str] ) -> str:
"""simple docstring"""
__snake_case = ''
for word_or_phrase in separated:
if not isinstance(lowercase__ , lowercase__ ):
raise Exception('join() accepts only strings to be joined' )
joined += word_or_phrase + separator
return joined.strip(lowercase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 56 |
'''simple docstring'''
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class _lowercase ( __lowercase ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.0_1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1000 ) -> Tuple:
__snake_case = p_stop
__snake_case = max_length
def __iter__( self : Any ) -> Union[str, Any]:
__snake_case = 0
__snake_case = False
while not stop and count < self.max_length:
yield count
count += 1
__snake_case = random.random() < self.p_stop
class _lowercase ( unittest.TestCase ):
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : str=False , SCREAMING_SNAKE_CASE_ : str=True ) -> Union[str, Any]:
__snake_case = [
BatchSamplerShard(SCREAMING_SNAKE_CASE_ , 2 , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
for i in range(2 )
]
__snake_case = [list(SCREAMING_SNAKE_CASE_ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(SCREAMING_SNAKE_CASE_ ) for shard in batch_sampler_shards] , [len(SCREAMING_SNAKE_CASE_ ) for e in expected] )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Tuple ) -> str:
# Check the shards when the dataset is a round multiple of total batch size.
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] ) -> Union[str, Any]:
# Check the shards when the dataset is a round multiple of batch size.
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size.
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
def a ( self : str ) -> str:
# Check the shards when the dataset is a round multiple of total batch size.
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[[0, 1]], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
def a ( self : int ) -> Tuple:
# Check the shards when the dataset is a round multiple of batch size.
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size.
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[[0, 1]], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[int] ) -> Tuple:
__snake_case = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
__snake_case = [BatchSamplerShard(SCREAMING_SNAKE_CASE_ , 2 , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int=False , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : int=False ) -> List[Any]:
random.seed(SCREAMING_SNAKE_CASE_ )
__snake_case = list(SCREAMING_SNAKE_CASE_ )
__snake_case = [
IterableDatasetShard(
SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , drop_last=SCREAMING_SNAKE_CASE_ , num_processes=SCREAMING_SNAKE_CASE_ , process_index=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , )
for i in range(SCREAMING_SNAKE_CASE_ )
]
__snake_case = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(SCREAMING_SNAKE_CASE_ )
iterable_dataset_lists.append(list(SCREAMING_SNAKE_CASE_ ) )
__snake_case = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
__snake_case = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
self.assertTrue(len(SCREAMING_SNAKE_CASE_ ) % shard_batch_size == 0 )
__snake_case = []
for idx in range(0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(SCREAMING_SNAKE_CASE_ ) < len(SCREAMING_SNAKE_CASE_ ):
reference += reference
self.assertListEqual(SCREAMING_SNAKE_CASE_ , reference[: len(SCREAMING_SNAKE_CASE_ )] )
def a ( self : Dict ) -> Tuple:
__snake_case = 42
__snake_case = RandomIterableDataset()
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Edge case with a very small dataset
__snake_case = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] ) -> str:
__snake_case = BatchSampler(range(16 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = SkipBatchSampler(SCREAMING_SNAKE_CASE_ , 2 )
self.assertListEqual(list(SCREAMING_SNAKE_CASE_ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a ( self : str ) -> Union[str, Any]:
__snake_case = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a ( self : Any ) -> str:
__snake_case = DataLoader(list(range(16 ) ) , batch_size=4 )
__snake_case = skip_first_batches(SCREAMING_SNAKE_CASE_ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a ( self : Dict ) -> Optional[Any]:
__snake_case = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def a ( self : Tuple ) -> Dict:
Accelerator()
__snake_case = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 56 | 1 |
'''simple docstring'''
from math import sqrt
def _a (lowercase__ : int ) -> bool:
"""simple docstring"""
assert isinstance(lowercase__ , lowercase__ ) and (
number >= 0
), "'number' must been an int and positive"
__snake_case = True
# 0 and 1 are none primes.
if number <= 1:
__snake_case = False
for divisor in range(2 , int(round(sqrt(lowercase__ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
__snake_case = False
break
# precondition
assert isinstance(lowercase__ , lowercase__ ), "'status' must been from type bool"
return status
def _a (lowercase__ : Optional[int] ) -> int:
"""simple docstring"""
assert isinstance(lowercase__ , lowercase__ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
__snake_case = list(range(2 , n + 1 ) )
__snake_case = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowercase__ ) ):
for j in range(i + 1 , len(lowercase__ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
__snake_case = 0
# filters actual prime numbers.
__snake_case = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowercase__ , lowercase__ ), "'ans' must been from type list"
return ans
def _a (lowercase__ : Dict ) -> Optional[int]:
"""simple docstring"""
assert isinstance(lowercase__ , lowercase__ ) and (n > 2), "'N' must been an int and > 2"
__snake_case = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowercase__ ):
ans.append(lowercase__ )
# precondition
assert isinstance(lowercase__ , lowercase__ ), "'ans' must been from type list"
return ans
def _a (lowercase__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
assert isinstance(lowercase__ , lowercase__ ) and number >= 0, "'number' must been an int and >= 0"
__snake_case = [] # this list will be returns of the function.
# potential prime number factors.
__snake_case = 2
__snake_case = number
if number == 0 or number == 1:
ans.append(lowercase__ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowercase__ ):
while quotient != 1:
if is_prime(lowercase__ ) and (quotient % factor == 0):
ans.append(lowercase__ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowercase__ )
# precondition
assert isinstance(lowercase__ , lowercase__ ), "'ans' must been from type list"
return ans
def _a (lowercase__ : Union[str, Any] ) -> Any:
"""simple docstring"""
assert isinstance(lowercase__ , lowercase__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__snake_case = 0
# prime factorization of 'number'
__snake_case = prime_factorization(lowercase__ )
__snake_case = max(lowercase__ )
# precondition
assert isinstance(lowercase__ , lowercase__ ), "'ans' must been from type int"
return ans
def _a (lowercase__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
assert isinstance(lowercase__ , lowercase__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__snake_case = 0
# prime factorization of 'number'
__snake_case = prime_factorization(lowercase__ )
__snake_case = min(lowercase__ )
# precondition
assert isinstance(lowercase__ , lowercase__ ), "'ans' must been from type int"
return ans
def _a (lowercase__ : Tuple ) -> Any:
"""simple docstring"""
assert isinstance(lowercase__ , lowercase__ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowercase__ ), "compare bust been from type bool"
return number % 2 == 0
def _a (lowercase__ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
assert isinstance(lowercase__ , lowercase__ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowercase__ ), "compare bust been from type bool"
return number % 2 != 0
def _a (lowercase__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
assert (
isinstance(lowercase__ , lowercase__ ) and (number > 2) and is_even(lowercase__ )
), "'number' must been an int, even and > 2"
__snake_case = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
__snake_case = get_prime_numbers(lowercase__ )
__snake_case = len(lowercase__ )
# run variable for while-loops.
__snake_case = 0
__snake_case = None
# exit variable. for break up the loops
__snake_case = True
while i < len_pn and loop:
__snake_case = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
__snake_case = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowercase__ , lowercase__ )
and (len(lowercase__ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def _a (lowercase__ : Optional[int] , lowercase__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
assert (
isinstance(lowercase__ , lowercase__ )
and isinstance(lowercase__ , lowercase__ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
__snake_case = 0
while numbera != 0:
__snake_case = numbera % numbera
__snake_case = numbera
__snake_case = rest
# precondition
assert isinstance(lowercase__ , lowercase__ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def _a (lowercase__ : List[str] , lowercase__ : Optional[int] ) -> int:
"""simple docstring"""
assert (
isinstance(lowercase__ , lowercase__ )
and isinstance(lowercase__ , lowercase__ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
__snake_case = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
__snake_case = prime_factorization(lowercase__ )
__snake_case = prime_factorization(lowercase__ )
elif numbera == 1 or numbera == 1:
__snake_case = []
__snake_case = []
__snake_case = max(lowercase__ , lowercase__ )
__snake_case = 0
__snake_case = 0
__snake_case = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
__snake_case = prime_fac_a.count(lowercase__ )
__snake_case = prime_fac_a.count(lowercase__ )
for _ in range(max(lowercase__ , lowercase__ ) ):
ans *= n
else:
__snake_case = prime_fac_a.count(lowercase__ )
for _ in range(lowercase__ ):
ans *= n
done.append(lowercase__ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
__snake_case = prime_fac_a.count(lowercase__ )
for _ in range(lowercase__ ):
ans *= n
done.append(lowercase__ )
# precondition
assert isinstance(lowercase__ , lowercase__ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def _a (lowercase__ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
assert isinstance(lowercase__ , lowercase__ ) and (n >= 0), "'number' must been a positive int"
__snake_case = 0
__snake_case = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowercase__ ):
ans += 1
# precondition
assert isinstance(lowercase__ , lowercase__ ) and is_prime(
lowercase__ ), "'ans' must been a prime number and from type int"
return ans
def _a (lowercase__ : List[str] , lowercase__ : List[Any] ) -> List[Any]:
"""simple docstring"""
assert (
is_prime(lowercase__ ) and is_prime(lowercase__ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
__snake_case = p_number_a + 1 # jump to the next number
__snake_case = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowercase__ ):
number += 1
while number < p_number_a:
ans.append(lowercase__ )
number += 1
# fetch the next prime number.
while not is_prime(lowercase__ ):
number += 1
# precondition
assert (
isinstance(lowercase__ , lowercase__ )
and ans[0] != p_number_a
and ans[len(lowercase__ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def _a (lowercase__ : int ) -> Dict:
"""simple docstring"""
assert isinstance(lowercase__ , lowercase__ ) and (n >= 1), "'n' must been int and >= 1"
__snake_case = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowercase__ )
# precondition
assert ans[0] == 1 and ans[len(lowercase__ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def _a (lowercase__ : List[Any] ) -> str:
"""simple docstring"""
assert isinstance(lowercase__ , lowercase__ ) and (
number > 1
), "'number' must been an int and >= 1"
__snake_case = get_divisors(lowercase__ )
# precondition
assert (
isinstance(lowercase__ , lowercase__ )
and (divisors[0] == 1)
and (divisors[len(lowercase__ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def _a (lowercase__ : List[Any] , lowercase__ : Tuple ) -> Tuple:
"""simple docstring"""
assert (
isinstance(lowercase__ , lowercase__ )
and isinstance(lowercase__ , lowercase__ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
__snake_case = gcd(abs(lowercase__ ) , abs(lowercase__ ) )
# precondition
assert (
isinstance(lowercase__ , lowercase__ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def _a (lowercase__ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
assert isinstance(lowercase__ , lowercase__ ) and (n >= 0), "'n' must been a int and >= 0"
__snake_case = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def _a (lowercase__ : str ) -> List[Any]:
"""simple docstring"""
assert isinstance(lowercase__ , lowercase__ ) and (n >= 0), "'n' must been an int and >= 0"
__snake_case = 0
__snake_case = 1
__snake_case = 1 # this will be return
for _ in range(n - 1 ):
__snake_case = ans
ans += fiba
__snake_case = tmp
return ans
| 56 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
_a : int = get_tests_dir("fixtures/test_sentencepiece.model")
_a : Dict = {"target_lang": "fi", "source_lang": "en"}
_a : Optional[int] = ">>zh<<"
_a : List[str] = "Helsinki-NLP/"
if is_torch_available():
_a : List[str] = "pt"
elif is_tf_available():
_a : Dict = "tf"
else:
_a : Union[str, Any] = "jax"
@require_sentencepiece
class _lowercase ( __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : int = MarianTokenizer
_SCREAMING_SNAKE_CASE : str = False
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
def a ( self : int ) -> int:
super().setUp()
__snake_case = ['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>']
__snake_case = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
__snake_case = Path(self.tmpdirname )
save_json(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['vocab'] )
save_json(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['tokenizer_config_file'] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['source_spm'] )
copyfile(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['target_spm'] )
__snake_case = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def a ( self : int , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> MarianTokenizer:
return MarianTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def a ( self : str , SCREAMING_SNAKE_CASE_ : List[str] ) -> List[Any]:
return (
"This is a test",
"This is a test",
)
def a ( self : int ) -> Optional[Any]:
__snake_case = '</s>'
__snake_case = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def a ( self : Dict ) -> List[str]:
__snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '</s>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '<pad>' )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 9 )
def a ( self : List[Any] ) -> str:
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def a ( self : Any ) -> Optional[int]:
__snake_case = MarianTokenizer.from_pretrained(f'{ORG_NAME}opus-mt-en-de' )
__snake_case = en_de_tokenizer(['I am a small frog'] , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = [38, 121, 14, 697, 3_8848, 0]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , batch.input_ids[0] )
__snake_case = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
__snake_case = [x.name for x in Path(SCREAMING_SNAKE_CASE_ ).glob('*' )]
self.assertIn('source.spm' , SCREAMING_SNAKE_CASE_ )
MarianTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[int] ) -> Any:
__snake_case = self.get_tokenizer()
__snake_case = tok(
['I am a small frog' * 1000, 'I am a small frog'] , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def a ( self : Tuple ) -> Dict:
__snake_case = self.get_tokenizer()
__snake_case = tok(['I am a tiny frog', 'I am a small frog'] , padding=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def a ( self : int ) -> int:
# fmt: off
__snake_case = {'input_ids': [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE_ , model_name='Helsinki-NLP/opus-mt-en-de' , revision='1a8c2263da11e68e50938f97e10cd57820bd504c' , decode_kwargs={'use_source_tokenizer': True} , )
def a ( self : Dict ) -> str:
__snake_case = MarianTokenizer.from_pretrained('hf-internal-testing/test-marian-two-vocabs' )
__snake_case = 'Tämä on testi'
__snake_case = 'This is a test'
__snake_case = [76, 7, 2047, 2]
__snake_case = [69, 12, 11, 940, 2]
__snake_case = tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer(text_target=SCREAMING_SNAKE_CASE_ ).input_ids
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 56 | 1 |
'''simple docstring'''
from __future__ import annotations
def _a (lowercase__ : str ) -> list[int]:
"""simple docstring"""
return [ord(lowercase__ ) - 9_6 for elem in plain]
def _a (lowercase__ : list[int] ) -> str:
"""simple docstring"""
return "".join(chr(elem + 9_6 ) for elem in encoded )
def _a () -> None:
"""simple docstring"""
__snake_case = encode(input('-> ' ).strip().lower() )
print('Encoded: ' , lowercase__ )
print('Decoded:' , decode(lowercase__ ) )
if __name__ == "__main__":
main()
| 56 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def _a (lowercase__ : bytes ) -> bytes:
"""simple docstring"""
if len(lowercase__ ) != 3_2:
raise ValueError('Input must be of length 32' )
__snake_case = B''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def _a (lowercase__ : int ) -> bytes:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
__snake_case = format(lowercase__ , '08x' )[-8:]
__snake_case = B''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def _a (lowercase__ : bytes ) -> bytes:
"""simple docstring"""
__snake_case = B''
for char in message:
bit_string += format(lowercase__ , '08b' ).encode('utf-8' )
__snake_case = format(len(lowercase__ ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(lowercase__ ) % 5_1_2 != 4_4_8:
bit_string += b"0"
bit_string += to_little_endian(start_len[3_2:] ) + to_little_endian(start_len[:3_2] )
return bit_string
def _a (lowercase__ : bytes ) -> Generator[list[int], None, None]:
"""simple docstring"""
if len(lowercase__ ) % 5_1_2 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(lowercase__ ) , 5_1_2 ):
__snake_case = bit_string[pos : pos + 5_1_2]
__snake_case = []
for i in range(0 , 5_1_2 , 3_2 ):
block_words.append(int(to_little_endian(block[i : i + 3_2] ) , 2 ) )
yield block_words
def _a (lowercase__ : int ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
__snake_case = format(lowercase__ , '032b' )
__snake_case = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(lowercase__ , 2 )
def _a (lowercase__ : int , lowercase__ : int ) -> int:
"""simple docstring"""
return (a + b) % 2**3_2
def _a (lowercase__ : int , lowercase__ : int ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (3_2 - shift))) % 2**3_2
def _a (lowercase__ : bytes ) -> bytes:
"""simple docstring"""
__snake_case = preprocess(lowercase__ )
__snake_case = [int(2**3_2 * abs(sin(i + 1 ) ) ) for i in range(6_4 )]
# Starting states
__snake_case = 0x6_7_4_5_2_3_0_1
__snake_case = 0xE_F_C_D_A_B_8_9
__snake_case = 0x9_8_B_A_D_C_F_E
__snake_case = 0x1_0_3_2_5_4_7_6
__snake_case = [
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(lowercase__ ):
__snake_case = aa
__snake_case = ba
__snake_case = ca
__snake_case = da
# Hash current chunk
for i in range(6_4 ):
if i <= 1_5:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
__snake_case = d ^ (b & (c ^ d))
__snake_case = i
elif i <= 3_1:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
__snake_case = c ^ (d & (b ^ c))
__snake_case = (5 * i + 1) % 1_6
elif i <= 4_7:
__snake_case = b ^ c ^ d
__snake_case = (3 * i + 5) % 1_6
else:
__snake_case = c ^ (b | not_aa(lowercase__ ))
__snake_case = (7 * i) % 1_6
__snake_case = (f + a + added_consts[i] + block_words[g]) % 2**3_2
__snake_case = d
__snake_case = c
__snake_case = b
__snake_case = sum_aa(lowercase__ , left_rotate_aa(lowercase__ , shift_amounts[i] ) )
# Add hashed chunk to running total
__snake_case = sum_aa(lowercase__ , lowercase__ )
__snake_case = sum_aa(lowercase__ , lowercase__ )
__snake_case = sum_aa(lowercase__ , lowercase__ )
__snake_case = sum_aa(lowercase__ , lowercase__ )
__snake_case = reformat_hex(lowercase__ ) + reformat_hex(lowercase__ ) + reformat_hex(lowercase__ ) + reformat_hex(lowercase__ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Any = logging.get_logger(__name__)
_a : Optional[Any] = {
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : List[Any] = "transfo-xl"
_SCREAMING_SNAKE_CASE : Any = ["mems"]
_SCREAMING_SNAKE_CASE : int = {
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : int , SCREAMING_SNAKE_CASE_ : List[Any]=26_7735 , SCREAMING_SNAKE_CASE_ : Any=[2_0000, 4_0000, 20_0000] , SCREAMING_SNAKE_CASE_ : Any=1024 , SCREAMING_SNAKE_CASE_ : Any=1024 , SCREAMING_SNAKE_CASE_ : Optional[int]=16 , SCREAMING_SNAKE_CASE_ : List[str]=64 , SCREAMING_SNAKE_CASE_ : Tuple=4096 , SCREAMING_SNAKE_CASE_ : Any=4 , SCREAMING_SNAKE_CASE_ : Optional[int]=False , SCREAMING_SNAKE_CASE_ : List[Any]=18 , SCREAMING_SNAKE_CASE_ : int=1600 , SCREAMING_SNAKE_CASE_ : Optional[int]=1000 , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : Any=0 , SCREAMING_SNAKE_CASE_ : Optional[int]=-1 , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : str=0.1 , SCREAMING_SNAKE_CASE_ : Tuple=0.0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , SCREAMING_SNAKE_CASE_ : List[str]="normal" , SCREAMING_SNAKE_CASE_ : Any=0.0_1 , SCREAMING_SNAKE_CASE_ : Tuple=0.0_1 , SCREAMING_SNAKE_CASE_ : Optional[int]=0.0_2 , SCREAMING_SNAKE_CASE_ : int=1e-5 , SCREAMING_SNAKE_CASE_ : Optional[int]=0 , **SCREAMING_SNAKE_CASE_ : Union[str, Any] , ) -> Optional[int]:
__snake_case = vocab_size
__snake_case = []
self.cutoffs.extend(SCREAMING_SNAKE_CASE_ )
if proj_share_all_but_first:
__snake_case = [False] + [True] * len(self.cutoffs )
else:
__snake_case = [False] + [False] * len(self.cutoffs )
__snake_case = d_model
__snake_case = d_embed
__snake_case = d_head
__snake_case = d_inner
__snake_case = div_val
__snake_case = pre_lnorm
__snake_case = n_layer
__snake_case = n_head
__snake_case = mem_len
__snake_case = same_length
__snake_case = attn_type
__snake_case = clamp_len
__snake_case = sample_softmax
__snake_case = adaptive
__snake_case = dropout
__snake_case = dropatt
__snake_case = untie_r
__snake_case = init
__snake_case = init_range
__snake_case = proj_init_std
__snake_case = init_std
__snake_case = layer_norm_epsilon
super().__init__(eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def a ( self : Optional[int] ) -> str:
# Message copied from Transformer-XL documentation
logger.info(f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
return -1
@max_position_embeddings.setter
def a ( self : Dict , SCREAMING_SNAKE_CASE_ : List[Any] ) -> List[str]:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
| 56 |
'''simple docstring'''
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def _a (lowercase__ : str , lowercase__ : str , lowercase__ : Optional[str] = None ) -> str:
"""simple docstring"""
if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release:
# old versions of hfh don't url-encode the file path
__snake_case = quote(lowercase__ )
return hfh.hf_hub_url(lowercase__ , lowercase__ , repo_type='dataset' , revision=lowercase__ )
| 56 | 1 |
'''simple docstring'''
def _a (lowercase__ : int , lowercase__ : int ) -> float:
"""simple docstring"""
return base * power(lowercase__ , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print("Raise base to the power of exponent using recursion...")
_a : Union[str, Any] = int(input("Enter the base: ").strip())
_a : Any = int(input("Enter the exponent: ").strip())
_a : List[str] = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
_a : List[Any] = 1 / result
print(f'''{base} to the power of {exponent} is {result}''')
| 56 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def _a (lowercase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _lowercase ( nn.Module ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : nn.Module , SCREAMING_SNAKE_CASE_ : int ) -> str:
super().__init__()
__snake_case = module
__snake_case = nn.Sequential(
nn.Linear(module.in_features , SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ ) , nn.Linear(SCREAMING_SNAKE_CASE_ , module.out_features , bias=SCREAMING_SNAKE_CASE_ ) , )
__snake_case = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=SCREAMING_SNAKE_CASE_ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Union[str, Any]:
return self.module(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) + self.adapter(SCREAMING_SNAKE_CASE_ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowercase ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
_SCREAMING_SNAKE_CASE : Tuple = "bigscience/bloom-1b7"
# Constant values
_SCREAMING_SNAKE_CASE : Union[str, Any] = 2.109659552692574
_SCREAMING_SNAKE_CASE : Optional[Any] = "Hello my name is"
_SCREAMING_SNAKE_CASE : List[str] = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
_SCREAMING_SNAKE_CASE : Dict = 1_0
def a ( self : Optional[Any] ) -> List[Any]:
# Models and tokenizer
__snake_case = AutoTokenizer.from_pretrained(self.model_name )
class _lowercase ( __lowercase ):
def a ( self : Union[str, Any] ) -> List[str]:
super().setUp()
# Models and tokenizer
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto' )
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
def a ( self : Optional[Any] ) -> Any:
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def a ( self : Optional[Any] ) -> int:
__snake_case = self.model_abit.config
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'quantization_config' ) )
__snake_case = config.to_dict()
__snake_case = config.to_diff_dict()
__snake_case = config.to_json_string()
def a ( self : Optional[Any] ) -> str:
from bitsandbytes.nn import Paramsabit
__snake_case = self.model_fpaa.get_memory_footprint()
__snake_case = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
__snake_case = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def a ( self : Union[str, Any] ) -> Optional[Any]:
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(SCREAMING_SNAKE_CASE_ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def a ( self : Union[str, Any] ) -> int:
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
__snake_case = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) , self.EXPECTED_OUTPUTS )
def a ( self : Optional[Any] ) -> Dict:
__snake_case = BitsAndBytesConfig()
__snake_case = True
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
__snake_case = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) , self.EXPECTED_OUTPUTS )
def a ( self : List[Any] ) -> str:
with self.assertRaises(SCREAMING_SNAKE_CASE_ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(SCREAMING_SNAKE_CASE_ )
def a ( self : Any ) -> Union[str, Any]:
__snake_case = BitsAndBytesConfig()
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=SCREAMING_SNAKE_CASE_ , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' , bnb_abit_quant_type='nf4' , )
def a ( self : Tuple ) -> Dict:
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with `str`
self.model_abit.to('cpu' )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0' ) )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
__snake_case = self.model_fpaa.to(torch.floataa )
__snake_case = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
__snake_case = self.model_fpaa.to('cpu' )
# Check this does not throw an error
__snake_case = self.model_fpaa.half()
# Check this does not throw an error
__snake_case = self.model_fpaa.float()
def a ( self : Tuple ) -> Union[str, Any]:
__snake_case = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowercase ( unittest.TestCase ):
@classmethod
def a ( cls : Union[str, Any] ) -> Dict:
__snake_case = 't5-small'
__snake_case = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
__snake_case = AutoTokenizer.from_pretrained(cls.model_name )
__snake_case = 'Translate in German: Hello, my dog is cute'
def a ( self : List[Any] ) -> str:
gc.collect()
torch.cuda.empty_cache()
def a ( self : int ) -> Optional[Any]:
from transformers import TaForConditionalGeneration
__snake_case = TaForConditionalGeneration._keep_in_fpaa_modules
__snake_case = None
# test with `t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
# test with `flan-t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
__snake_case = modules
def a ( self : List[str] ) -> Any:
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
# test with `flan-t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
class _lowercase ( __lowercase ):
def a ( self : Dict ) -> str:
super().setUp()
# model_name
__snake_case = 'bigscience/bloom-560m'
__snake_case = 't5-small'
# Different types of model
__snake_case = AutoModel.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# Sequence classification model
__snake_case = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# CausalLM model
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# Seq2seq model
__snake_case = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
def a ( self : int ) -> Dict:
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def a ( self : Any ) -> Optional[Any]:
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class _lowercase ( __lowercase ):
def a ( self : str ) -> Union[str, Any]:
super().setUp()
def a ( self : Optional[Any] ) -> str:
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def a ( self : Optional[int] ) -> List[str]:
__snake_case = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__snake_case = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class _lowercase ( __lowercase ):
def a ( self : Optional[int] ) -> Union[str, Any]:
super().setUp()
def a ( self : Optional[int] ) -> List[Any]:
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='balanced' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
# Second real batch
__snake_case = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) , self.EXPECTED_OUTPUTS )
class _lowercase ( __lowercase ):
def a ( self : Any ) -> str:
__snake_case = 'facebook/opt-350m'
super().setUp()
def a ( self : int ) -> List[Any]:
if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ):
return
# Step 1: freeze all parameters
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
__snake_case = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__snake_case = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(SCREAMING_SNAKE_CASE_ ) ):
__snake_case = LoRALayer(module.q_proj , rank=16 )
__snake_case = LoRALayer(module.k_proj , rank=16 )
__snake_case = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
__snake_case = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__snake_case = model.forward(**SCREAMING_SNAKE_CASE_ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(SCREAMING_SNAKE_CASE_ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = "gpt2-xl"
_SCREAMING_SNAKE_CASE : Optional[int] = 3.3191854854152187
| 56 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _lowercase ( __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : str = TextToVideoSDPipeline
_SCREAMING_SNAKE_CASE : Tuple = TEXT_TO_IMAGE_PARAMS
_SCREAMING_SNAKE_CASE : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
_SCREAMING_SNAKE_CASE : List[str] = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def a ( self : Union[str, Any] ) -> Dict:
torch.manual_seed(0 )
__snake_case = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=32 , attention_head_dim=4 , )
__snake_case = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=SCREAMING_SNAKE_CASE_ , set_alpha_to_one=SCREAMING_SNAKE_CASE_ , )
torch.manual_seed(0 )
__snake_case = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
__snake_case = CLIPTextModel(SCREAMING_SNAKE_CASE_ )
__snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__snake_case = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def a ( self : Any , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0 ) -> Union[str, Any]:
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
__snake_case = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
__snake_case = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
__snake_case = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def a ( self : Any ) -> Union[str, Any]:
__snake_case = 'cpu' # ensure determinism for the device-dependent torch.Generator
__snake_case = self.get_dummy_components()
__snake_case = TextToVideoSDPipeline(**SCREAMING_SNAKE_CASE_ )
__snake_case = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
__snake_case = 'np'
__snake_case = sd_pipe(**SCREAMING_SNAKE_CASE_ ).frames
__snake_case = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
__snake_case = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a ( self : Optional[int] ) -> str:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=SCREAMING_SNAKE_CASE_ , expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def a ( self : Any ) -> int:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=SCREAMING_SNAKE_CASE_ , expected_max_diff=1e-2 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def a ( self : List[str] ) -> Optional[int]:
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def a ( self : Optional[int] ) -> Any:
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def a ( self : List[Any] ) -> List[str]:
pass
def a ( self : Dict ) -> Optional[int]:
return super().test_progress_bar()
@slow
@skip_mps
class _lowercase ( unittest.TestCase ):
def a ( self : List[Any] ) -> str:
__snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy' )
__snake_case = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
__snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__snake_case = pipe.to('cuda' )
__snake_case = 'Spiderman is surfing'
__snake_case = torch.Generator(device='cpu' ).manual_seed(0 )
__snake_case = pipe(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=25 , output_type='pt' ).frames
__snake_case = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def a ( self : Dict ) -> Any:
__snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy' )
__snake_case = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
__snake_case = pipe.to('cuda' )
__snake_case = 'Spiderman is surfing'
__snake_case = torch.Generator(device='cpu' ).manual_seed(0 )
__snake_case = pipe(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=2 , output_type='pt' ).frames
__snake_case = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 56 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class _lowercase ( unittest.TestCase ):
def a ( self : int ) -> List[str]:
__snake_case = '| <pad> <unk> <s> </s> a b c d e f g h i j k'.split()
__snake_case = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
__snake_case = {
'unk_token': '<unk>',
'bos_token': '<s>',
'eos_token': '</s>',
}
__snake_case = {
'feature_size': 1,
'padding_value': 0.0,
'sampling_rate': 1_6000,
'return_attention_mask': False,
'do_normalize': True,
}
__snake_case = tempfile.mkdtemp()
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__snake_case = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE_ )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
with open(self.feature_extraction_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
# load decoder from hub
__snake_case = 'hf-internal-testing/ngram-beam-search-decoder'
def a ( self : Optional[int] , **SCREAMING_SNAKE_CASE_ : Tuple ) -> Dict:
__snake_case = self.add_kwargs_tokens_map.copy()
kwargs.update(SCREAMING_SNAKE_CASE_ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] , **SCREAMING_SNAKE_CASE_ : Any ) -> Optional[Any]:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def a ( self : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> Tuple:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **SCREAMING_SNAKE_CASE_ )
def a ( self : int ) -> Dict:
shutil.rmtree(self.tmpdirname )
def a ( self : int ) -> Tuple:
__snake_case = self.get_tokenizer()
__snake_case = self.get_feature_extractor()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
__snake_case = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE_ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , SCREAMING_SNAKE_CASE_ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , SCREAMING_SNAKE_CASE_ )
def a ( self : Dict ) -> Union[str, Any]:
__snake_case = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__snake_case = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def a ( self : str ) -> Tuple:
__snake_case = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['xx'] )
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE_ , 'include' ):
WavaVecaProcessorWithLM(
tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def a ( self : List[str] ) -> List[str]:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = floats_list((3, 1000) )
__snake_case = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
__snake_case = processor(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def a ( self : Tuple ) -> Tuple:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = 'This is a test string'
__snake_case = processor(text=SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer(SCREAMING_SNAKE_CASE_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a ( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any]=(2, 10, 16) , SCREAMING_SNAKE_CASE_ : Dict=77 ) -> Dict:
np.random.seed(SCREAMING_SNAKE_CASE_ )
return np.random.rand(*SCREAMING_SNAKE_CASE_ )
def a ( self : Any ) -> Tuple:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits(shape=(10, 16) , seed=13 )
__snake_case = processor.decode(SCREAMING_SNAKE_CASE_ )
__snake_case = decoder.decode_beams(SCREAMING_SNAKE_CASE_ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('</s> <s> </s>' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['fork'], ['spawn']] )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] ) -> Dict:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__snake_case = processor.batch_decode(SCREAMING_SNAKE_CASE_ )
else:
with get_context(SCREAMING_SNAKE_CASE_ ).Pool() as pool:
__snake_case = processor.batch_decode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = list(SCREAMING_SNAKE_CASE_ )
with get_context('fork' ).Pool() as p:
__snake_case = decoder.decode_beams_batch(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case , __snake_case , __snake_case = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.text )
self.assertListEqual(['<s> <s> </s>', '<s> <s> <s>'] , decoded_processor.text )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.logit_score )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.lm_score )
def a ( self : Any ) -> Dict:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits()
__snake_case = 15
__snake_case = -2_0.0
__snake_case = -4.0
__snake_case = processor.batch_decode(
SCREAMING_SNAKE_CASE_ , beam_width=SCREAMING_SNAKE_CASE_ , beam_prune_logp=SCREAMING_SNAKE_CASE_ , token_min_logp=SCREAMING_SNAKE_CASE_ , )
__snake_case = decoded_processor_out.text
__snake_case = list(SCREAMING_SNAKE_CASE_ )
with get_context('fork' ).Pool() as pool:
__snake_case = decoder.decode_beams_batch(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , beam_width=SCREAMING_SNAKE_CASE_ , beam_prune_logp=SCREAMING_SNAKE_CASE_ , token_min_logp=SCREAMING_SNAKE_CASE_ , )
__snake_case = [d[0][0] for d in decoded_decoder_out]
__snake_case = [d[0][2] for d in decoded_decoder_out]
__snake_case = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(['</s> <s> <s>', '<s> <s> <s>'] , SCREAMING_SNAKE_CASE_ )
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
def a ( self : Optional[Any] ) -> Tuple:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits()
__snake_case = 2.0
__snake_case = 5.0
__snake_case = -2_0.0
__snake_case = True
__snake_case = processor.batch_decode(
SCREAMING_SNAKE_CASE_ , alpha=SCREAMING_SNAKE_CASE_ , beta=SCREAMING_SNAKE_CASE_ , unk_score_offset=SCREAMING_SNAKE_CASE_ , lm_score_boundary=SCREAMING_SNAKE_CASE_ , )
__snake_case = decoded_processor_out.text
__snake_case = list(SCREAMING_SNAKE_CASE_ )
decoder.reset_params(
alpha=SCREAMING_SNAKE_CASE_ , beta=SCREAMING_SNAKE_CASE_ , unk_score_offset=SCREAMING_SNAKE_CASE_ , lm_score_boundary=SCREAMING_SNAKE_CASE_ , )
with get_context('fork' ).Pool() as pool:
__snake_case = decoder.decode_beams_batch(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
__snake_case = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(['<s> </s> <s> </s> </s>', '</s> </s> <s> </s> </s>'] , SCREAMING_SNAKE_CASE_ )
__snake_case = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -2_0.0 )
self.assertEqual(lm_model.score_boundary , SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] ) -> List[str]:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = processor.decoder.model_container[processor.decoder._model_key]
__snake_case = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
__snake_case = os.listdir(SCREAMING_SNAKE_CASE_ )
__snake_case = ['alphabet.json', 'language_model']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Dict ) -> Dict:
__snake_case = snapshot_download('hf-internal-testing/processor_with_lm' )
__snake_case = WavaVecaProcessorWithLM.from_pretrained(SCREAMING_SNAKE_CASE_ )
__snake_case = processor.decoder.model_container[processor.decoder._model_key]
__snake_case = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
__snake_case = os.listdir(SCREAMING_SNAKE_CASE_ )
__snake_case = os.listdir(SCREAMING_SNAKE_CASE_ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Any ) -> List[Any]:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = AutoProcessor.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = floats_list((3, 1000) )
__snake_case = processor_wavaveca(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
__snake_case = processor_auto(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
__snake_case = self._get_dummy_logits()
__snake_case = processor_wavaveca.batch_decode(SCREAMING_SNAKE_CASE_ )
__snake_case = processor_auto.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def a ( self : Dict ) -> Optional[int]:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
@staticmethod
def a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int:
__snake_case = [d[key] for d in offsets]
return retrieved_list
def a ( self : Optional[int] ) -> str:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = self._get_dummy_logits()[0]
__snake_case = processor.decode(SCREAMING_SNAKE_CASE_ , output_word_offsets=SCREAMING_SNAKE_CASE_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(' '.join(self.get_from_offsets(outputs['word_offsets'] , 'word' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'end_offset' ) , [1, 3, 5] )
def a ( self : Optional[Any] ) -> Optional[int]:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = self._get_dummy_logits()
__snake_case = processor.batch_decode(SCREAMING_SNAKE_CASE_ , output_word_offsets=SCREAMING_SNAKE_CASE_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(
[' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) for o in outputs['word_offsets']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'end_offset' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def a ( self : Optional[Any] ) -> Optional[Any]:
import torch
__snake_case = load_dataset('common_voice' , 'en' , split='train' , streaming=SCREAMING_SNAKE_CASE_ )
__snake_case = ds.cast_column('audio' , datasets.Audio(sampling_rate=1_6000 ) )
__snake_case = iter(SCREAMING_SNAKE_CASE_ )
__snake_case = next(SCREAMING_SNAKE_CASE_ )
__snake_case = AutoProcessor.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
__snake_case = WavaVecaForCTC.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__snake_case = processor(sample['audio']['array'] , return_tensors='pt' ).input_values
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ ).logits.cpu().numpy()
__snake_case = processor.decode(logits[0] , output_word_offsets=SCREAMING_SNAKE_CASE_ )
__snake_case = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__snake_case = [
{
'start_time': d['start_offset'] * time_offset,
'end_time': d['end_offset'] * time_offset,
'word': d['word'],
}
for d in output['word_offsets']
]
__snake_case = 'WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'
# output words
self.assertEqual(' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) , output.text )
# output times
__snake_case = torch.tensor(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'start_time' ) )
__snake_case = torch.tensor(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'end_time' ) )
# fmt: off
__snake_case = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
__snake_case = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=0.0_1 ) )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=0.0_1 ) )
| 56 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
_a : int = {"configuration_beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig", "BeitOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = ["BeitFeatureExtractor"]
_a : int = ["BeitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[Any] = [
"BEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BeitForImageClassification",
"BeitForMaskedImageModeling",
"BeitForSemanticSegmentation",
"BeitModel",
"BeitPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[str] = [
"FlaxBeitForImageClassification",
"FlaxBeitForMaskedImageModeling",
"FlaxBeitModel",
"FlaxBeitPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
_a : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 56 |
'''simple docstring'''
def _a (lowercase__ : int , lowercase__ : int ) -> float:
"""simple docstring"""
return base * power(lowercase__ , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print("Raise base to the power of exponent using recursion...")
_a : Union[str, Any] = int(input("Enter the base: ").strip())
_a : Any = int(input("Enter the exponent: ").strip())
_a : List[str] = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
_a : List[Any] = 1 / result
print(f'''{base} to the power of {exponent} is {result}''')
| 56 | 1 |
'''simple docstring'''
from __future__ import annotations
from random import choice
def _a (lowercase__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
return choice(lowercase__ )
def _a (lowercase__ : list[int] , lowercase__ : int ) -> int:
"""simple docstring"""
__snake_case = random_pivot(lowercase__ )
# partition based on pivot
# linear time
__snake_case = [e for e in lst if e < pivot]
__snake_case = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(lowercase__ ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(lowercase__ ) < k - 1:
return kth_number(lowercase__ , k - len(lowercase__ ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(lowercase__ , lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 |
'''simple docstring'''
import math
from collections.abc import Callable
def _a (lowercase__ : Callable[[float], float] , lowercase__ : float , lowercase__ : float ) -> float:
"""simple docstring"""
__snake_case = xa
__snake_case = xa
while True:
if x_n == x_na or function(lowercase__ ) == function(lowercase__ ):
raise ZeroDivisionError('float division by zero, could not find root' )
__snake_case = x_na - (
function(lowercase__ ) / ((function(lowercase__ ) - function(lowercase__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 1_0**-5:
return x_na
__snake_case = x_na
__snake_case = x_na
def _a (lowercase__ : float ) -> float:
"""simple docstring"""
return math.pow(lowercase__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 56 | 1 |
'''simple docstring'''
import math
def _a (lowercase__ : int ) -> bool:
"""simple docstring"""
assert isinstance(lowercase__ , lowercase__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
__snake_case = range(3 , int(math.sqrt(lowercase__ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def _a (lowercase__ : str , lowercase__ : str=1 , **lowercase__ : int ) -> Union[str, Any]:
"""simple docstring"""
__snake_case = factor * value
__snake_case = value
while not is_prime(lowercase__ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **lowercase__ )
return value
| 56 |
'''simple docstring'''
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class _lowercase ( __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : str = CpmAntTokenizer
_SCREAMING_SNAKE_CASE : Optional[Any] = False
def a ( self : Optional[Any] ) -> Any:
super().setUp()
__snake_case = [
'<d>',
'</d>',
'<s>',
'</s>',
'</_>',
'<unk>',
'<pad>',
'</n>',
'我',
'是',
'C',
'P',
'M',
'A',
'n',
't',
]
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
@tooslow
def a ( self : List[Any] ) -> Dict:
__snake_case = CpmAntTokenizer.from_pretrained('openbmb/cpm-ant-10b' )
__snake_case = '今天天气真好!'
__snake_case = ['今天', '天气', '真', '好', '!']
__snake_case = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = '今天天气真好!'
__snake_case = [tokenizer.bos_token] + tokens
__snake_case = [6, 9802, 1_4962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 56 | 1 |
'''simple docstring'''
import logging
from transformers.configuration_utils import PretrainedConfig
_a : Any = logging.getLogger(__name__)
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : Tuple = "masked_bert"
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str=3_0522 , SCREAMING_SNAKE_CASE_ : int=768 , SCREAMING_SNAKE_CASE_ : List[str]=12 , SCREAMING_SNAKE_CASE_ : List[str]=12 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3072 , SCREAMING_SNAKE_CASE_ : List[str]="gelu" , SCREAMING_SNAKE_CASE_ : int=0.1 , SCREAMING_SNAKE_CASE_ : str=0.1 , SCREAMING_SNAKE_CASE_ : Tuple=512 , SCREAMING_SNAKE_CASE_ : List[Any]=2 , SCREAMING_SNAKE_CASE_ : List[Any]=0.0_2 , SCREAMING_SNAKE_CASE_ : str=1e-12 , SCREAMING_SNAKE_CASE_ : str=0 , SCREAMING_SNAKE_CASE_ : Optional[int]="topK" , SCREAMING_SNAKE_CASE_ : List[str]="constant" , SCREAMING_SNAKE_CASE_ : int=0.0 , **SCREAMING_SNAKE_CASE_ : Union[str, Any] , ) -> str:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = pruning_method
__snake_case = mask_init
__snake_case = mask_scale
| 56 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def _a (lowercase__ : list ) -> int:
"""simple docstring"""
if not postfix_notation:
return 0
__snake_case = {'+', '-', '*', '/'}
__snake_case = []
for token in postfix_notation:
if token in operations:
__snake_case , __snake_case = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(lowercase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_a : Any = {"configuration_van": ["VAN_PRETRAINED_CONFIG_ARCHIVE_MAP", "VanConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[int] = [
"VAN_PRETRAINED_MODEL_ARCHIVE_LIST",
"VanForImageClassification",
"VanModel",
"VanPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
_a : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 56 |
'''simple docstring'''
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square(lowercase__ : int , lowercase__ : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
__snake_case = update_area_of_max_square(lowercase__ , col + 1 )
__snake_case = update_area_of_max_square(row + 1 , col + 1 )
__snake_case = update_area_of_max_square(row + 1 , lowercase__ )
if mat[row][col]:
__snake_case = 1 + min([right, diagonal, down] )
__snake_case = max(largest_square_area[0] , lowercase__ )
return sub_problem_sol
else:
return 0
__snake_case = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
__snake_case = update_area_of_max_square_using_dp_array(lowercase__ , col + 1 , lowercase__ )
__snake_case = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , lowercase__ )
__snake_case = update_area_of_max_square_using_dp_array(row + 1 , lowercase__ , lowercase__ )
if mat[row][col]:
__snake_case = 1 + min([right, diagonal, down] )
__snake_case = max(largest_square_area[0] , lowercase__ )
__snake_case = sub_problem_sol
return sub_problem_sol
else:
return 0
__snake_case = [0]
__snake_case = [[-1] * cols for _ in range(lowercase__ )]
update_area_of_max_square_using_dp_array(0 , 0 , lowercase__ )
return largest_square_area[0]
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
"""simple docstring"""
__snake_case = [[0] * (cols + 1) for _ in range(rows + 1 )]
__snake_case = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__snake_case = dp_array[row][col + 1]
__snake_case = dp_array[row + 1][col + 1]
__snake_case = dp_array[row + 1][col]
if mat[row][col] == 1:
__snake_case = 1 + min(lowercase__ , lowercase__ , lowercase__ )
__snake_case = max(dp_array[row][col] , lowercase__ )
else:
__snake_case = 0
return largest_square_area
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
"""simple docstring"""
__snake_case = [0] * (cols + 1)
__snake_case = [0] * (cols + 1)
__snake_case = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__snake_case = current_row[col + 1]
__snake_case = next_row[col + 1]
__snake_case = next_row[col]
if mat[row][col] == 1:
__snake_case = 1 + min(lowercase__ , lowercase__ , lowercase__ )
__snake_case = max(current_row[col] , lowercase__ )
else:
__snake_case = 0
__snake_case = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 56 | 1 |
'''simple docstring'''
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
_a : List[Any] = "<<<<<<< This should probably be modified because it mentions: "
_a : Dict = "=======\n>>>>>>>\n"
_a : int = [
"TextEncoderConfig",
"ByteTextEncoder",
"SubwordTextEncoder",
"encoder_config",
"maybe_build_from_corpus",
"manual_dir",
]
_a : Any = [
# (pattern, replacement)
# Order is important here for some replacements
(R"tfds\.core", R"datasets"),
(R"tf\.io\.gfile\.GFile", R"open"),
(R"tf\.([\w\d]+)", R"datasets.Value('\1')"),
(R"tfds\.features\.Text\(\)", R"datasets.Value('string')"),
(R"tfds\.features\.Text\(", R"datasets.Value('string'),"),
(R"features\s*=\s*tfds.features.FeaturesDict\(", R"features=datasets.Features("),
(R"tfds\.features\.FeaturesDict\(", R"dict("),
(R"The TensorFlow Datasets Authors", R"The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"),
(R"tfds\.", R"datasets."),
(R"dl_manager\.manual_dir", R"self.config.data_dir"),
(R"self\.builder_config", R"self.config"),
]
def _a (lowercase__ : Namespace ) -> Dict:
"""simple docstring"""
return ConvertCommand(args.tfds_path , args.datasets_directory )
class _lowercase ( __lowercase ):
@staticmethod
def a ( SCREAMING_SNAKE_CASE_ : ArgumentParser ) -> List[Any]:
__snake_case = parser.add_parser(
'convert' , help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.' , )
train_parser.add_argument(
'--tfds_path' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.' , )
train_parser.add_argument(
'--datasets_directory' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='Path to the HuggingFace Datasets folder.' )
train_parser.set_defaults(func=SCREAMING_SNAKE_CASE_ )
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , *SCREAMING_SNAKE_CASE_ : List[str] ) -> Dict:
__snake_case = get_logger('datasets-cli/converting' )
__snake_case = tfds_path
__snake_case = datasets_directory
def a ( self : Optional[Any] ) -> Tuple:
if os.path.isdir(self._tfds_path ):
__snake_case = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
__snake_case = os.path.dirname(self._tfds_path )
else:
raise ValueError('--tfds_path is neither a directory nor a file. Please check path.' )
__snake_case = os.path.abspath(self._datasets_directory )
self._logger.info(f'Converting datasets from {abs_tfds_path} to {abs_datasets_path}' )
__snake_case = []
__snake_case = []
__snake_case = {}
if os.path.isdir(self._tfds_path ):
__snake_case = os.listdir(SCREAMING_SNAKE_CASE_ )
else:
__snake_case = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'Looking at file {f_name}' )
__snake_case = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if not os.path.isfile(SCREAMING_SNAKE_CASE_ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('Skipping file' )
continue
with open(SCREAMING_SNAKE_CASE_ , encoding='utf-8' ) as f:
__snake_case = f.readlines()
__snake_case = []
__snake_case = False
__snake_case = False
__snake_case = []
for line in lines:
__snake_case = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
__snake_case = 'import datasets\n'
elif "import tensorflow" in out_line:
# order is important here
__snake_case = ''
continue
elif "from absl import logging" in out_line:
__snake_case = 'from datasets import logging\n'
elif "getLogger" in out_line:
__snake_case = out_line.replace('getLogger' , 'get_logger' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
__snake_case = True
__snake_case = list(filter(lambda SCREAMING_SNAKE_CASE_ : e in out_line , SCREAMING_SNAKE_CASE_ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(SCREAMING_SNAKE_CASE_ ) + '\n' )
out_lines.append(SCREAMING_SNAKE_CASE_ )
out_lines.append(SCREAMING_SNAKE_CASE_ )
continue
else:
for pattern, replacement in TO_CONVERT:
__snake_case = re.sub(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
__snake_case = re.match(r'from\stensorflow_datasets.*import\s([^\.\r\n]+)' , SCREAMING_SNAKE_CASE_ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(',' ) )
__snake_case = 'from . import ' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'Error converting {out_line.strip()}' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
__snake_case = True
out_lines.append(SCREAMING_SNAKE_CASE_ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
__snake_case = f_name.replace('.py' , '' )
__snake_case = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
self._logger.info(f'Adding directory {output_dir}' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(SCREAMING_SNAKE_CASE_ )
if needs_manual_update:
with_manual_update.append(SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8' ) as f:
f.writelines(SCREAMING_SNAKE_CASE_ )
self._logger.info(f'Converted in {output_file}' )
for utils_file in utils_files:
try:
__snake_case = os.path.basename(SCREAMING_SNAKE_CASE_ )
__snake_case = imports_to_builder_map[f_name.replace('.py' , '' )]
self._logger.info(f'Moving {dest_folder} to {utils_file}' )
shutil.copy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except KeyError:
self._logger.error(f'Cannot find destination folder for {utils_file}. Please copy manually.' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.' )
| 56 |
'''simple docstring'''
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def _a () -> Union[str, Any]:
"""simple docstring"""
__snake_case = 1_0
__snake_case = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
__snake_case = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [9_7], 'text': ['1976']}] * 1_0,
'id': list(range(lowercase__ ) ),
} , features=lowercase__ , )
return dataset
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple , lowercase__ : Dict ) -> Tuple:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=lowercase__ )
return filename
# FILE_CONTENT + files
_a : Union[str, Any] = "\\n Text data.\n Second line of data."
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> List[str]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt'
__snake_case = FILE_CONTENT
with open(lowercase__ , 'w' ) as f:
f.write(lowercase__ )
return filename
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
import bza
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
__snake_case = bytes(lowercase__ , 'utf-8' )
with bza.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Union[str, Any] ) -> Dict:
"""simple docstring"""
import gzip
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
__snake_case = bytes(lowercase__ , 'utf-8' )
with gzip.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple ) -> Optional[int]:
"""simple docstring"""
if datasets.config.LZ4_AVAILABLE:
import lza.frame
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
__snake_case = bytes(lowercase__ , 'utf-8' )
with lza.frame.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Dict , lowercase__ : Tuple ) -> Tuple:
"""simple docstring"""
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(lowercase__ , 'w' ) as archive:
archive.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[int] , lowercase__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
import tarfile
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(lowercase__ , 'w' ) as f:
f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> Tuple:
"""simple docstring"""
import lzma
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
__snake_case = bytes(lowercase__ , 'utf-8' )
with lzma.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Dict , lowercase__ : str ) -> Union[str, Any]:
"""simple docstring"""
import zipfile
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> int:
"""simple docstring"""
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
__snake_case = bytes(lowercase__ , 'utf-8' )
with zstd.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.xml'
__snake_case = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(lowercase__ , 'w' ) as f:
f.write(lowercase__ )
return filename
_a : int = [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
_a : List[str] = [
{"col_1": "4", "col_2": 4, "col_3": 4.0},
{"col_1": "5", "col_2": 5, "col_3": 5.0},
]
_a : Tuple = {
"col_1": ["0", "1", "2", "3"],
"col_2": [0, 1, 2, 3],
"col_3": [0.0, 1.0, 2.0, 3.0],
}
_a : Optional[int] = [
{"col_3": 0.0, "col_1": "0", "col_2": 0},
{"col_3": 1.0, "col_1": "1", "col_2": 1},
]
_a : Any = [
{"col_1": "s0", "col_2": 0, "col_3": 0.0},
{"col_1": "s1", "col_2": 1, "col_3": 1.0},
{"col_1": "s2", "col_2": 2, "col_3": 2.0},
{"col_1": "s3", "col_2": 3, "col_3": 3.0},
]
@pytest.fixture(scope='session' )
def _a () -> Optional[Any]:
"""simple docstring"""
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case = datasets.Dataset.from_dict(lowercase__ )
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Any ) -> Dict:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(lowercase__ ) ) as con:
__snake_case = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> Tuple:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(lowercase__ , 'w' , newline='' ) as f:
__snake_case = csv.DictWriter(lowercase__ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> Tuple:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(lowercase__ , 'w' , newline='' ) as f:
__snake_case = csv.DictWriter(lowercase__ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : str , lowercase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
import bza
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(lowercase__ , 'rb' ) as f:
__snake_case = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] , lowercase__ : Union[str, Any] , lowercase__ : str ) -> Optional[int]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] , lowercase__ : Tuple , lowercase__ : int ) -> int:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(lowercase__ , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] , lowercase__ : Dict , lowercase__ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] ) -> int:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
__snake_case = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(lowercase__ , 'wb' ) as f:
__snake_case = pq.ParquetWriter(lowercase__ , schema=lowercase__ )
__snake_case = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowercase__ ) )] for k in DATA[0]} , schema=lowercase__ )
writer.write_table(lowercase__ )
writer.close()
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> List[str]:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
__snake_case = {'data': DATA}
with open(lowercase__ , 'w' ) as f:
json.dump(lowercase__ , lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] ) -> List[Any]:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
__snake_case = {'data': DATA_DICT_OF_LISTS}
with open(lowercase__ , 'w' ) as f:
json.dump(lowercase__ , lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(lowercase__ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowercase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(lowercase__ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowercase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : int ) -> Tuple:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(lowercase__ , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(lowercase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Dict ) -> int:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(lowercase__ , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(lowercase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : int , lowercase__ : List[Any] ) -> Dict:
"""simple docstring"""
import gzip
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(lowercase__ , 'rb' ) as orig_file:
with gzip.open(lowercase__ , 'wb' ) as zipped_file:
zipped_file.writelines(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Union[str, Any] , lowercase__ : Dict ) -> Optional[Any]:
"""simple docstring"""
import gzip
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(lowercase__ , 'rb' ) as orig_file:
with gzip.open(lowercase__ , 'wb' ) as zipped_file:
zipped_file.writelines(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : str , lowercase__ : str , lowercase__ : str ) -> Optional[int]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Dict , lowercase__ : Optional[int] , lowercase__ : List[Any] , lowercase__ : List[Any] ) -> str:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.join('nested' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : str ) -> Optional[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : str , lowercase__ : Optional[int] , lowercase__ : Tuple ) -> List[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(lowercase__ , 'w' ) as f:
f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple , lowercase__ : Tuple , lowercase__ : Dict , lowercase__ : int ) -> Optional[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(lowercase__ , 'w' ) as f:
f.add(lowercase__ , arcname=os.path.join('nested' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__snake_case = ['0', '1', '2', '3']
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(lowercase__ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] ) -> List[Any]:
"""simple docstring"""
__snake_case = ['0', '1', '2', '3']
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(lowercase__ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[int] ) -> Dict:
"""simple docstring"""
__snake_case = ['0', '1', '2', '3']
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(lowercase__ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] , lowercase__ : Union[str, Any] , lowercase__ : Any ) -> str:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple , lowercase__ : Any , lowercase__ : Tuple ) -> List[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] , lowercase__ : Optional[int] , lowercase__ : Any ) -> Union[str, Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename('unsupported.ext' ) )
f.write(lowercase__ , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Any ) -> List[Any]:
"""simple docstring"""
__snake_case = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(lowercase__ , 'w' , encoding='utf-8' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a () -> int:
"""simple docstring"""
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def _a () -> Optional[int]:
"""simple docstring"""
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] , lowercase__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : str ) -> List[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 1_0 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 1_0 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
return data_dir
| 56 | 1 |
'''simple docstring'''
def _a (lowercase__ : str , lowercase__ : int ) -> str:
"""simple docstring"""
__snake_case = [[] for _ in range(lowercase__ )]
__snake_case = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1 or len(lowercase__ ) <= key:
return input_string
for position, character in enumerate(lowercase__ ):
__snake_case = position % (lowest * 2) # puts it in bounds
__snake_case = min(lowercase__ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(lowercase__ )
__snake_case = [''.join(lowercase__ ) for row in temp_grid]
__snake_case = ''.join(lowercase__ )
return output_string
def _a (lowercase__ : str , lowercase__ : int ) -> str:
"""simple docstring"""
__snake_case = []
__snake_case = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1:
return input_string
__snake_case = [[] for _ in range(lowercase__ )] # generates template
for position in range(len(lowercase__ ) ):
__snake_case = position % (lowest * 2) # puts it in bounds
__snake_case = min(lowercase__ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('*' )
__snake_case = 0
for row in temp_grid: # fills in the characters
__snake_case = input_string[counter : counter + len(lowercase__ )]
grid.append(list(lowercase__ ) )
counter += len(lowercase__ )
__snake_case = '' # reads as zigzag
for position in range(len(lowercase__ ) ):
__snake_case = position % (lowest * 2) # puts it in bounds
__snake_case = min(lowercase__ , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def _a (lowercase__ : str ) -> dict[int, str]:
"""simple docstring"""
__snake_case = {}
for key_guess in range(1 , len(lowercase__ ) ): # tries every key
__snake_case = decrypt(lowercase__ , lowercase__ )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a : Optional[Any] = logging.get_logger(__name__)
_a : Tuple = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = "camembert"
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3_0522 , SCREAMING_SNAKE_CASE_ : str=768 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=12 , SCREAMING_SNAKE_CASE_ : Dict=12 , SCREAMING_SNAKE_CASE_ : Optional[Any]=3072 , SCREAMING_SNAKE_CASE_ : Tuple="gelu" , SCREAMING_SNAKE_CASE_ : List[str]=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=512 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : Any=0.0_2 , SCREAMING_SNAKE_CASE_ : Tuple=1e-12 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1 , SCREAMING_SNAKE_CASE_ : Dict=0 , SCREAMING_SNAKE_CASE_ : int=2 , SCREAMING_SNAKE_CASE_ : Dict="absolute" , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : Tuple=None , **SCREAMING_SNAKE_CASE_ : Dict , ) -> int:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = position_embedding_type
__snake_case = use_cache
__snake_case = classifier_dropout
class _lowercase ( __lowercase ):
@property
def a ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__snake_case = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 56 | 1 |
'''simple docstring'''
def _a (lowercase__ : str ) -> list:
"""simple docstring"""
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(lowercase__ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("doctest").testmod()
| 56 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : List[str] = logging.get_logger(__name__)
_a : Dict = {
"facebook/timesformer": "https://huggingface.co/facebook/timesformer/resolve/main/config.json",
}
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : int = "timesformer"
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : List[str]=224 , SCREAMING_SNAKE_CASE_ : List[str]=16 , SCREAMING_SNAKE_CASE_ : Any=3 , SCREAMING_SNAKE_CASE_ : int=8 , SCREAMING_SNAKE_CASE_ : Tuple=768 , SCREAMING_SNAKE_CASE_ : int=12 , SCREAMING_SNAKE_CASE_ : Optional[int]=12 , SCREAMING_SNAKE_CASE_ : Optional[int]=3072 , SCREAMING_SNAKE_CASE_ : Tuple="gelu" , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.0 , SCREAMING_SNAKE_CASE_ : List[Any]=0.0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0_2 , SCREAMING_SNAKE_CASE_ : Any=1e-6 , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : List[str]="divided_space_time" , SCREAMING_SNAKE_CASE_ : int=0 , **SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> List[str]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = num_frames
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = qkv_bias
__snake_case = attention_type
__snake_case = drop_path_rate
| 56 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
_a : Optional[Any] = logging.get_logger(__name__)
class _lowercase ( __lowercase ):
def __init__( self : Any , *SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> None:
warnings.warn(
'The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use GLPNImageProcessor instead.' , SCREAMING_SNAKE_CASE_ , )
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 56 |
'''simple docstring'''
from typing import Any
class _lowercase :
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Any ) -> Any:
__snake_case = data
__snake_case = None
class _lowercase :
def __init__( self : List[Any] ) -> Tuple:
__snake_case = None
def a ( self : int ) -> Union[str, Any]:
__snake_case = self.head
while temp is not None:
print(temp.data , end=' ' )
__snake_case = temp.next
print()
def a ( self : Dict , SCREAMING_SNAKE_CASE_ : Any ) -> List[str]:
__snake_case = Node(SCREAMING_SNAKE_CASE_ )
__snake_case = self.head
__snake_case = new_node
def a ( self : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any ) -> List[str]:
if node_data_a == node_data_a:
return
else:
__snake_case = self.head
while node_a is not None and node_a.data != node_data_a:
__snake_case = node_a.next
__snake_case = self.head
while node_a is not None and node_a.data != node_data_a:
__snake_case = node_a.next
if node_a is None or node_a is None:
return
__snake_case , __snake_case = node_a.data, node_a.data
if __name__ == "__main__":
_a : Dict = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("After swapping")
ll.print_list()
| 56 | 1 |
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
SCREAMING_SNAKE_CASE__ : Tuple = logging.getLogger(__name__)
class lowerCamelCase_ ( lowerCamelCase ):
a__ = '''summarization'''
a__ = ['''loss''']
a__ = ROUGE_KEYS
a__ = '''rouge2'''
def __init__( self , __lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
if hparams.sortish_sampler and hparams.gpus > 1:
__magic_name__ :Dict = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('''Dynamic Batch size does not work for multi-gpu training''' )
if hparams.sortish_sampler:
raise ValueError('''--sortish_sampler and --max_tokens_per_batch may not be used simultaneously''' )
super().__init__(__lowerCAmelCase , num_labels=__lowerCAmelCase , mode=self.mode , **__lowerCAmelCase )
use_task_specific_params(self.model , '''summarization''' )
save_git_info(self.hparams.output_dir )
__magic_name__ :Optional[int] = Path(self.output_dir ) / '''metrics.json'''
__magic_name__ :List[str] = Path(self.output_dir ) / '''hparams.pkl'''
pickle_save(self.hparams , self.hparams_save_path )
__magic_name__ :Optional[int] = 0
__magic_name__ :List[Any] = defaultdict(__lowerCAmelCase )
__magic_name__ :int = self.config.model_type
__magic_name__ :Union[str, Any] = self.config.tgt_vocab_size if self.model_type == '''fsmt''' else self.config.vocab_size
__magic_name__ :dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
__magic_name__ :List[Any] = {
'''train''': self.hparams.n_train,
'''val''': self.hparams.n_val,
'''test''': self.hparams.n_test,
}
__magic_name__ :Union[str, Any] = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
__magic_name__ :Optional[Any] = {
'''train''': self.hparams.max_target_length,
'''val''': self.hparams.val_max_target_length,
'''test''': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], F'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
__magic_name__ :Tuple = get_git_info()['''repo_sha''']
__magic_name__ :Any = hparams.num_workers
__magic_name__ :List[str] = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , __lowerCAmelCase ):
__magic_name__ :Tuple = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
__magic_name__ :Union[str, Any] = self.decoder_start_token_id
__magic_name__ :Any = (
SeqaSeqDataset if hasattr(self.tokenizer , '''prepare_seq2seq_batch''' ) else LegacySeqaSeqDataset
)
__magic_name__ :List[str] = False
__magic_name__ :List[Any] = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
__magic_name__ :Tuple = self.hparams.eval_max_gen_length
else:
__magic_name__ :Union[str, Any] = self.model.config.max_length
__magic_name__ :List[str] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Tuple = {
k: self.tokenizer.batch_decode(v.tolist() ) if '''mask''' not in k else v.shape for k, v in batch.items()
}
save_json(__lowerCAmelCase , Path(self.output_dir ) / '''text_batch.json''' )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / '''tok_batch.json''' )
__magic_name__ :Tuple = True
return readable_batch
def A ( self , __lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return self.model(__lowerCAmelCase , **__lowerCAmelCase )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :str = self.tokenizer.batch_decode(
__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
return lmap(str.strip , __lowerCAmelCase )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[Any] = self.tokenizer.pad_token_id
__magic_name__ , __magic_name__ :Optional[int] = batch['''input_ids'''], batch['''attention_mask''']
__magic_name__ :Dict = batch['''labels''']
if isinstance(self.model , __lowerCAmelCase ):
__magic_name__ :List[str] = self.model._shift_right(__lowerCAmelCase )
else:
__magic_name__ :Optional[Any] = shift_tokens_right(__lowerCAmelCase , __lowerCAmelCase )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
__magic_name__ :List[str] = decoder_input_ids
self.save_readable_batch(__lowerCAmelCase )
__magic_name__ :List[Any] = self(__lowerCAmelCase , attention_mask=__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase , use_cache=__lowerCAmelCase )
__magic_name__ :int = outputs['''logits''']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
__magic_name__ :Union[str, Any] = nn.CrossEntropyLoss(ignore_index=__lowerCAmelCase )
assert lm_logits.shape[-1] == self.vocab_size
__magic_name__ :Union[str, Any] = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
__magic_name__ :List[Any] = nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )
__magic_name__ , __magic_name__ :Tuple = label_smoothed_nll_loss(
__lowerCAmelCase , __lowerCAmelCase , self.hparams.label_smoothing , ignore_index=__lowerCAmelCase )
return (loss,)
@property
def A ( self ):
"""simple docstring"""
return self.tokenizer.pad_token_id
def A ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :int = self._step(__lowerCAmelCase )
__magic_name__ :Any = dict(zip(self.loss_names , __lowerCAmelCase ) )
# tokens per batch
__magic_name__ :Dict = batch['''input_ids'''].ne(self.pad ).sum() + batch['''labels'''].ne(self.pad ).sum()
__magic_name__ :List[Any] = batch['''input_ids'''].shape[0]
__magic_name__ :List[str] = batch['''input_ids'''].eq(self.pad ).sum()
__magic_name__ :str = batch['''input_ids'''].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def A ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
return self._generative_step(__lowerCAmelCase )
def A ( self , __lowerCAmelCase , __lowerCAmelCase="val" ):
"""simple docstring"""
self.step_count += 1
__magic_name__ :Dict = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
__magic_name__ :Dict = losses['''loss''']
__magic_name__ :Any = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['''gen_time''', '''gen_len''']
}
__magic_name__ :Tuple = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
__magic_name__ :torch.FloatTensor = torch.tensor(__lowerCAmelCase ).type_as(__lowerCAmelCase )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(__lowerCAmelCase )
__magic_name__ :Optional[int] = {F'''{prefix}_avg_{k}''': x for k, x in losses.items()}
__magic_name__ :Tuple = self.step_count
self.metrics[prefix].append(__lowerCAmelCase ) # callback writes this to self.metrics_save_path
__magic_name__ :Dict = flatten_list([x['''preds'''] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'''{prefix}_loss''': loss,
F'''{prefix}_{self.val_metric}''': metric_tensor,
}
def A ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
return calculate_rouge(__lowerCAmelCase , __lowerCAmelCase )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[int] = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
__magic_name__ :Any = self.model.generate(
batch['''input_ids'''] , attention_mask=batch['''attention_mask'''] , use_cache=__lowerCAmelCase , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
__magic_name__ :int = (time.time() - ta) / batch['''input_ids'''].shape[0]
__magic_name__ :List[str] = self.ids_to_clean_text(__lowerCAmelCase )
__magic_name__ :List[str] = self.ids_to_clean_text(batch['''labels'''] )
__magic_name__ :List[Any] = self._step(__lowerCAmelCase )
__magic_name__ :str = dict(zip(self.loss_names , __lowerCAmelCase ) )
__magic_name__ :Dict = self.calc_generative_metrics(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :Dict = np.mean(lmap(__lowerCAmelCase , __lowerCAmelCase ) )
base_metrics.update(gen_time=__lowerCAmelCase , gen_len=__lowerCAmelCase , preds=__lowerCAmelCase , target=__lowerCAmelCase , **__lowerCAmelCase )
return base_metrics
def A ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
return self._generative_step(__lowerCAmelCase )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
return self.validation_epoch_end(__lowerCAmelCase , prefix='''test''' )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[Any] = self.n_obs[type_path]
__magic_name__ :Optional[int] = self.target_lens[type_path]
__magic_name__ :List[Any] = self.dataset_class(
self.tokenizer , type_path=__lowerCAmelCase , n_obs=__lowerCAmelCase , max_target_length=__lowerCAmelCase , **self.dataset_kwargs , )
return dataset
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False ):
"""simple docstring"""
__magic_name__ :Optional[Any] = self.get_dataset(__lowerCAmelCase )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
__magic_name__ :Any = dataset.make_sortish_sampler(__lowerCAmelCase , distributed=self.hparams.gpus > 1 )
return DataLoader(
__lowerCAmelCase , batch_size=__lowerCAmelCase , collate_fn=dataset.collate_fn , shuffle=__lowerCAmelCase , num_workers=self.num_workers , sampler=__lowerCAmelCase , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
__magic_name__ :Optional[Any] = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
__lowerCAmelCase , batch_sampler=__lowerCAmelCase , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
__lowerCAmelCase , batch_size=__lowerCAmelCase , collate_fn=dataset.collate_fn , shuffle=__lowerCAmelCase , num_workers=self.num_workers , sampler=__lowerCAmelCase , )
def A ( self ):
"""simple docstring"""
__magic_name__ :int = self.get_dataloader('''train''' , batch_size=self.hparams.train_batch_size , shuffle=__lowerCAmelCase )
return dataloader
def A ( self ):
"""simple docstring"""
return self.get_dataloader('''val''' , batch_size=self.hparams.eval_batch_size )
def A ( self ):
"""simple docstring"""
return self.get_dataloader('''test''' , batch_size=self.hparams.eval_batch_size )
@staticmethod
def A ( __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
BaseTransformer.add_model_specific_args(__lowerCAmelCase , __lowerCAmelCase )
add_generic_args(__lowerCAmelCase , __lowerCAmelCase )
parser.add_argument(
'''--max_source_length''' , default=1_0_2_4 , type=__lowerCAmelCase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--max_target_length''' , default=5_6 , type=__lowerCAmelCase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--val_max_target_length''' , default=1_4_2 , type=__lowerCAmelCase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--test_max_target_length''' , default=1_4_2 , type=__lowerCAmelCase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument('''--freeze_encoder''' , action='''store_true''' )
parser.add_argument('''--freeze_embeds''' , action='''store_true''' )
parser.add_argument('''--sortish_sampler''' , action='''store_true''' , default=__lowerCAmelCase )
parser.add_argument('''--overwrite_output_dir''' , action='''store_true''' , default=__lowerCAmelCase )
parser.add_argument('''--max_tokens_per_batch''' , type=__lowerCAmelCase , default=__lowerCAmelCase )
parser.add_argument('''--logger_name''' , type=__lowerCAmelCase , choices=['''default''', '''wandb''', '''wandb_shared'''] , default='''default''' )
parser.add_argument('''--n_train''' , type=__lowerCAmelCase , default=-1 , required=__lowerCAmelCase , help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_val''' , type=__lowerCAmelCase , default=5_0_0 , required=__lowerCAmelCase , help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_test''' , type=__lowerCAmelCase , default=-1 , required=__lowerCAmelCase , help='''# examples. -1 means use all.''' )
parser.add_argument(
'''--task''' , type=__lowerCAmelCase , default='''summarization''' , required=__lowerCAmelCase , help='''# examples. -1 means use all.''' )
parser.add_argument('''--label_smoothing''' , type=__lowerCAmelCase , default=0.0 , required=__lowerCAmelCase )
parser.add_argument('''--src_lang''' , type=__lowerCAmelCase , default='''''' , required=__lowerCAmelCase )
parser.add_argument('''--tgt_lang''' , type=__lowerCAmelCase , default='''''' , required=__lowerCAmelCase )
parser.add_argument('''--eval_beams''' , type=__lowerCAmelCase , default=__lowerCAmelCase , required=__lowerCAmelCase )
parser.add_argument(
'''--val_metric''' , type=__lowerCAmelCase , default=__lowerCAmelCase , required=__lowerCAmelCase , choices=['''bleu''', '''rouge2''', '''loss''', None] )
parser.add_argument('''--eval_max_gen_length''' , type=__lowerCAmelCase , default=__lowerCAmelCase , help='''never generate more than n tokens''' )
parser.add_argument('''--save_top_k''' , type=__lowerCAmelCase , default=1 , required=__lowerCAmelCase , help='''How many checkpoints to save''' )
parser.add_argument(
'''--early_stopping_patience''' , type=__lowerCAmelCase , default=-1 , required=__lowerCAmelCase , help=(
'''-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'''
''' val_check_interval will effect it.'''
) , )
return parser
class lowerCamelCase_ ( lowerCamelCase ):
a__ = '''translation'''
a__ = ['''loss''']
a__ = ['''bleu''']
a__ = '''bleu'''
def __init__( self , __lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
super().__init__(__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Any = hparams.src_lang
__magic_name__ :Union[str, Any] = hparams.tgt_lang
def A ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
return calculate_bleu(__lowerCAmelCase , __lowerCAmelCase )
def __lowercase ( snake_case, snake_case=None ):
"""simple docstring"""
Path(args.output_dir ).mkdir(exist_ok=snake_case )
check_output_dir(snake_case, expected_items=3 )
if model is None:
if "summarization" in args.task:
__magic_name__ :SummarizationModule = SummarizationModule(snake_case )
else:
__magic_name__ :SummarizationModule = TranslationModule(snake_case )
__magic_name__ :Dict = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('''/tmp''' )
or str(args.output_dir ).startswith('''/var''' )
):
__magic_name__ :Optional[int] = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
__magic_name__ :List[str] = os.environ.get('''WANDB_PROJECT''', snake_case )
__magic_name__ :Tuple = WandbLogger(name=model.output_dir.name, project=snake_case )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
__magic_name__ :Optional[Any] = WandbLogger(name=model.output_dir.name, project=f'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
__magic_name__ :str = get_early_stopping_callback(model.val_metric, args.early_stopping_patience )
else:
__magic_name__ :str = False
__magic_name__ :Dict = args.val_metric == '''loss'''
__magic_name__ :pl.Trainer = generic_train(
snake_case, snake_case, logging_callback=SeqaSeqLoggingCallback(), checkpoint_callback=get_checkpoint_callback(
args.output_dir, model.val_metric, args.save_top_k, snake_case ), early_stopping_callback=snake_case, logger=snake_case, )
pickle_save(model.hparams, model.output_dir / '''hparams.pkl''' )
if not args.do_predict:
return model
__magic_name__ :Union[str, Any] = ''''''
__magic_name__ :Tuple = sorted(glob.glob(os.path.join(args.output_dir, '''*.ckpt''' ), recursive=snake_case ) )
if checkpoints:
__magic_name__ :Dict = checkpoints[-1]
__magic_name__ :Any = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = argparse.ArgumentParser()
SCREAMING_SNAKE_CASE__ : Optional[Any] = pl.Trainer.add_argparse_args(parser)
SCREAMING_SNAKE_CASE__ : Optional[int] = SummarizationModule.add_model_specific_args(parser, os.getcwd())
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
main(args)
| 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_a : int = {
"configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"],
"tokenization_tapas": ["TapasTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
"TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TapasForMaskedLM",
"TapasForQuestionAnswering",
"TapasForSequenceClassification",
"TapasModel",
"TapasPreTrainedModel",
"load_tf_weights_in_tapas",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = [
"TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFTapasForMaskedLM",
"TFTapasForQuestionAnswering",
"TFTapasForSequenceClassification",
"TFTapasModel",
"TFTapasPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
_a : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 56 | 0 |
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
return abs(_lowercase ) if a == 0 else greatest_common_divisor(b % a , _lowercase )
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
while y: # --> when y=0 then loop will terminate and return x as final GCD.
__UpperCamelCase, __UpperCamelCase = y, x % y
return abs(_lowercase )
def _A ( ) -> Optional[int]:
"""simple docstring"""
try:
__UpperCamelCase = input('Enter two integers separated by comma (,): ' ).split(',' )
__UpperCamelCase = int(nums[0] )
__UpperCamelCase = int(nums[1] )
print(
f'''greatest_common_divisor({num_a}, {num_a}) = '''
f'''{greatest_common_divisor(_lowercase , _lowercase )}''' )
print(f'''By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(_lowercase , _lowercase )}''' )
except (IndexError, UnboundLocalError, ValueError):
print('Wrong input' )
if __name__ == "__main__":
main()
| 1 |
'''simple docstring'''
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _lowercase ( __lowercase , __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : List[str] = AutoencoderKL
_SCREAMING_SNAKE_CASE : Union[str, Any] = "sample"
_SCREAMING_SNAKE_CASE : Union[str, Any] = 1e-2
@property
def a ( self : List[str] ) -> Optional[int]:
__snake_case = 4
__snake_case = 3
__snake_case = (32, 32)
__snake_case = floats_tensor((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE_ )
return {"sample": image}
@property
def a ( self : List[Any] ) -> List[Any]:
return (3, 32, 32)
@property
def a ( self : int ) -> int:
return (3, 32, 32)
def a ( self : Tuple ) -> Union[str, Any]:
__snake_case = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
__snake_case = self.dummy_input
return init_dict, inputs_dict
def a ( self : Optional[Any] ) -> Any:
pass
def a ( self : Tuple ) -> List[Any]:
pass
@unittest.skipIf(torch_device == 'mps' , 'Gradient checkpointing skipped on MPS' )
def a ( self : List[str] ) -> int:
# enable deterministic behavior for gradient checkpointing
__snake_case , __snake_case = self.prepare_init_args_and_inputs_for_common()
__snake_case = self.model_class(**SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
assert not model.is_gradient_checkpointing and model.training
__snake_case = model(**SCREAMING_SNAKE_CASE_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
__snake_case = torch.randn_like(SCREAMING_SNAKE_CASE_ )
__snake_case = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
__snake_case = self.model_class(**SCREAMING_SNAKE_CASE_ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(SCREAMING_SNAKE_CASE_ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
__snake_case = model_a(**SCREAMING_SNAKE_CASE_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
__snake_case = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
__snake_case = dict(model.named_parameters() )
__snake_case = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) )
def a ( self : int ) -> int:
__snake_case , __snake_case = AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy' , output_loading_info=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(SCREAMING_SNAKE_CASE_ )
__snake_case = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def a ( self : Optional[int] ) -> List[str]:
__snake_case = AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy' )
__snake_case = model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
if torch_device == "mps":
__snake_case = torch.manual_seed(0 )
else:
__snake_case = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
__snake_case = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
__snake_case = image.to(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ , sample_posterior=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).sample
__snake_case = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
__snake_case = torch.tensor(
[
-4.0_078e-01,
-3.8_323e-04,
-1.2_681e-01,
-1.1_462e-01,
2.0_095e-01,
1.0_893e-01,
-8.8_247e-02,
-3.0_361e-01,
-9.8_644e-03,
] )
elif torch_device == "cpu":
__snake_case = torch.tensor(
[-0.1_3_5_2, 0.0_8_7_8, 0.0_4_1_9, -0.0_8_1_8, -0.1_0_6_9, 0.0_6_8_8, -0.1_4_5_8, -0.4_4_4_6, -0.0_0_2_6] )
else:
__snake_case = torch.tensor(
[-0.2_4_2_1, 0.4_6_4_2, 0.2_5_0_7, -0.0_4_3_8, 0.0_6_8_2, 0.3_1_6_0, -0.2_0_1_8, -0.0_7_2_7, 0.2_4_8_5] )
self.assertTrue(torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , rtol=1e-2 ) )
@slow
class _lowercase ( unittest.TestCase ):
def a ( self : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any ) -> Union[str, Any]:
return f'gaussian_noise_s={seed}_shape={"_".join([str(SCREAMING_SNAKE_CASE_ ) for s in shape] )}.npy'
def a ( self : Optional[Any] ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : int , SCREAMING_SNAKE_CASE_ : Optional[Any]=0 , SCREAMING_SNAKE_CASE_ : int=(4, 3, 512, 512) , SCREAMING_SNAKE_CASE_ : str=False ) -> int:
__snake_case = torch.floataa if fpaa else torch.floataa
__snake_case = torch.from_numpy(load_hf_numpy(self.get_file_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) ).to(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
return image
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple="CompVis/stable-diffusion-v1-4" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False ) -> List[str]:
__snake_case = 'fp16' if fpaa else None
__snake_case = torch.floataa if fpaa else torch.floataa
__snake_case = AutoencoderKL.from_pretrained(
SCREAMING_SNAKE_CASE_ , subfolder='vae' , torch_dtype=SCREAMING_SNAKE_CASE_ , revision=SCREAMING_SNAKE_CASE_ , )
model.to(SCREAMING_SNAKE_CASE_ ).eval()
return model
def a ( self : List[str] , SCREAMING_SNAKE_CASE_ : Tuple=0 ) -> Union[str, Any]:
if torch_device == "mps":
return torch.manual_seed(SCREAMING_SNAKE_CASE_ )
return torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_3, 0.9_8_7_8, -0.0_4_9_5, -0.0_7_9_0, -0.2_7_0_9, 0.8_3_7_5, -0.2_0_6_0, -0.0_8_2_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_6, 0.1_1_6_8, 0.1_3_3_2, -0.4_8_4_0, -0.2_5_0_8, -0.0_7_9_1, -0.0_4_9_3, -0.4_0_8_9], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def a ( self : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_generator(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , sample_posterior=SCREAMING_SNAKE_CASE_ ).sample
assert sample.shape == image.shape
__snake_case = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__snake_case = torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_5_1_3, 0.0_2_8_9, 1.3_7_9_9, 0.2_1_6_6, -0.2_5_7_3, -0.0_8_7_1, 0.5_1_0_3, -0.0_9_9_9]],
[47, [-0.4_1_2_8, -0.1_3_2_0, -0.3_7_0_4, 0.1_9_6_5, -0.4_1_1_6, -0.2_3_3_2, -0.3_3_4_0, 0.2_2_4_7]],
# fmt: on
] )
@require_torch_gpu
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Union[str, Any]:
__snake_case = self.get_sd_vae_model(fpaa=SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , fpaa=SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_generator(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , sample_posterior=SCREAMING_SNAKE_CASE_ ).sample
assert sample.shape == image.shape
__snake_case = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE_ )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_9, 0.9_8_6_6, -0.0_4_8_7, -0.0_7_7_7, -0.2_7_1_6, 0.8_3_6_8, -0.2_0_5_5, -0.0_8_1_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_7, 0.1_1_4_7, 0.1_3_3_3, -0.4_8_4_1, -0.2_5_0_6, -0.0_8_0_5, -0.0_4_9_1, -0.4_0_8_5], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def a ( self : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict ) -> List[Any]:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ ).sample
assert sample.shape == image.shape
__snake_case = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__snake_case = torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_0_5_1, -0.1_8_0_3, -0.2_3_1_1, -0.2_1_1_4, -0.3_2_9_2, -0.3_5_7_4, -0.2_9_5_3, -0.3_3_2_3]],
[37, [-0.2_6_3_2, -0.2_6_2_5, -0.2_1_9_9, -0.2_7_4_1, -0.4_5_3_9, -0.4_9_9_0, -0.3_7_2_0, -0.4_9_2_5]],
# fmt: on
] )
@require_torch_gpu
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> int:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__snake_case = sample[-1, -2:, :2, -2:].flatten().cpu()
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE_ )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_3_6_9, 0.0_2_0_7, -0.0_7_7_6, -0.0_6_8_2, -0.1_7_4_7, -0.1_9_3_0, -0.1_4_6_5, -0.2_0_3_9]],
[16, [-0.1_6_2_8, -0.2_1_3_4, -0.2_7_4_7, -0.2_6_4_2, -0.3_7_7_4, -0.4_4_0_4, -0.3_6_8_7, -0.4_2_7_7]],
# fmt: on
] )
@require_torch_gpu
def a ( self : int , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> str:
__snake_case = self.get_sd_vae_model(fpaa=SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 64, 64) , fpaa=SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__snake_case = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE_ )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='xformers is not required when using PyTorch 2.0.' )
def a ( self : Any , SCREAMING_SNAKE_CASE_ : int ) -> Tuple:
__snake_case = self.get_sd_vae_model(fpaa=SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 64, 64) , fpaa=SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='xformers is not required when using PyTorch 2.0.' )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE_ : int ) -> str:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_0_0_1, 0.0_9_1_8, -2.6_9_8_4, -3.9_7_2_0, -3.2_0_9_9, -5.0_3_5_3, 1.7_3_3_8, -0.2_0_6_5, 3.4_2_6_7]],
[47, [-1.5_0_3_0, -4.3_8_7_1, -6.0_3_5_5, -9.1_1_5_7, -1.6_6_6_1, -2.7_8_5_3, 2.1_6_0_7, -5.0_8_2_3, 2.5_6_3_3]],
# fmt: on
] )
def a ( self : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Union[str, Any]:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_generator(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model.encode(SCREAMING_SNAKE_CASE_ ).latent_dist
__snake_case = dist.sample(generator=SCREAMING_SNAKE_CASE_ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
__snake_case = sample[0, -1, -3:, -3:].flatten().cpu()
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE_ )
__snake_case = 3e-3 if torch_device != 'mps' else 1e-2
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=SCREAMING_SNAKE_CASE_ )
| 56 | 0 |
def SCREAMING_SNAKE_CASE_ ( _snake_case :list[list[int | float]] ) -> int:
_A = len(_snake_case )
_A = len(matrix[0] )
_A = min(_snake_case , _snake_case )
for row in range(_snake_case ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , _snake_case ):
_A = matrix[col][row] / matrix[row][row]
for i in range(_snake_case , _snake_case ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
_A = True
for i in range(row + 1 , _snake_case ):
if matrix[i][row] != 0:
_A , _A = matrix[i], matrix[row]
_A = False
break
if reduce:
rank -= 1
for i in range(_snake_case ):
_A = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _lowercase ( __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ShapEPipeline
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["prompt"]
_SCREAMING_SNAKE_CASE : Any = ["prompt"]
_SCREAMING_SNAKE_CASE : str = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
_SCREAMING_SNAKE_CASE : Optional[int] = False
@property
def a ( self : Any ) -> Optional[int]:
return 32
@property
def a ( self : List[Any] ) -> List[Any]:
return 32
@property
def a ( self : Tuple ) -> List[str]:
return self.time_input_dim * 4
@property
def a ( self : Dict ) -> Union[str, Any]:
return 8
@property
def a ( self : List[Any] ) -> Optional[Any]:
__snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def a ( self : Dict ) -> Any:
torch.manual_seed(0 )
__snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(SCREAMING_SNAKE_CASE_ )
@property
def a ( self : str ) -> Dict:
torch.manual_seed(0 )
__snake_case = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__snake_case = PriorTransformer(**SCREAMING_SNAKE_CASE_ )
return model
@property
def a ( self : Optional[Any] ) -> Dict:
torch.manual_seed(0 )
__snake_case = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__snake_case = ShapERenderer(**SCREAMING_SNAKE_CASE_ )
return model
def a ( self : Tuple ) -> Dict:
__snake_case = self.dummy_prior
__snake_case = self.dummy_text_encoder
__snake_case = self.dummy_tokenizer
__snake_case = self.dummy_renderer
__snake_case = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1024 , prediction_type='sample' , use_karras_sigmas=SCREAMING_SNAKE_CASE_ , clip_sample=SCREAMING_SNAKE_CASE_ , clip_sample_range=1.0 , )
__snake_case = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def a ( self : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int]=0 ) -> Union[str, Any]:
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
__snake_case = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
__snake_case = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
__snake_case = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def a ( self : Optional[Any] ) -> str:
__snake_case = 'cpu'
__snake_case = self.get_dummy_components()
__snake_case = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
__snake_case = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__snake_case = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) )
__snake_case = output.images[0]
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__snake_case = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a ( self : int ) -> List[str]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def a ( self : Dict ) -> Any:
__snake_case = torch_device == 'cpu'
__snake_case = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=SCREAMING_SNAKE_CASE_ , relax_max_difference=SCREAMING_SNAKE_CASE_ , )
def a ( self : Union[str, Any] ) -> str:
__snake_case = self.get_dummy_components()
__snake_case = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
__snake_case = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__snake_case = 1
__snake_case = 2
__snake_case = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
for key in inputs.keys():
if key in self.batch_params:
__snake_case = batch_size * [inputs[key]]
__snake_case = pipe(**SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
def a ( self : Optional[int] ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : Union[str, Any] ) -> Optional[Any]:
__snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
__snake_case = ShapEPipeline.from_pretrained('openai/shap-e' )
__snake_case = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__snake_case = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
__snake_case = pipe(
'a shark' , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 56 | 0 |
'''simple docstring'''
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : List[str] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = ["""input_ids""", """attention_mask"""]
def __init__( self , A_="</s>" , A_="<unk>" , A_="<pad>" , A_=125 , A_=None , **A_ , )-> None:
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
UpperCamelCase = [F'''<extra_id_{i}>''' for i in range(A_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
UpperCamelCase = len(set(filter(lambda A_ : bool('extra_id' in str(A_ ) ) , A_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the'
' extra_ids tokens' )
UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else pad_token
UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else eos_token
UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else unk_token
super().__init__(
eos_token=A_ , unk_token=A_ , pad_token=A_ , extra_ids=A_ , additional_special_tokens=A_ , **A_ , )
UpperCamelCase = extra_ids
UpperCamelCase = 2**8 # utf is 8 bits
# define special tokens dict
UpperCamelCase = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
UpperCamelCase = len(self.special_tokens_encoder )
UpperCamelCase = len(A_ )
for i, token in enumerate(A_ ):
UpperCamelCase = self.vocab_size + i - n
UpperCamelCase = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def UpperCAmelCase_ ( self , A_ , A_ = None , A_ = False )-> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(A_ )) + [1]
return ([0] * len(A_ )) + [1] + ([0] * len(A_ )) + [1]
def UpperCAmelCase_ ( self , A_ )-> List[int]:
'''simple docstring'''
if len(A_ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def UpperCAmelCase_ ( self , A_ , A_ = None )-> List[int]:
'''simple docstring'''
UpperCamelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def UpperCAmelCase_ ( self , A_ , A_ = None )-> List[int]:
'''simple docstring'''
UpperCamelCase = self._add_eos_if_not_present(A_ )
if token_ids_a is None:
return token_ids_a
else:
UpperCamelCase = self._add_eos_if_not_present(A_ )
return token_ids_a + token_ids_a
def UpperCAmelCase_ ( self , A_ )-> List[str]:
'''simple docstring'''
UpperCamelCase = [chr(A_ ) for i in text.encode('utf-8' )]
return tokens
def UpperCAmelCase_ ( self , A_ )-> Any:
'''simple docstring'''
if token in self.special_tokens_encoder:
UpperCamelCase = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
UpperCamelCase = self.added_tokens_encoder[token]
elif len(A_ ) != 1:
UpperCamelCase = self.unk_token_id
else:
UpperCamelCase = ord(A_ ) + self._num_special_tokens
return token_id
def UpperCAmelCase_ ( self , A_ )-> Optional[int]:
'''simple docstring'''
if index in self.special_tokens_decoder:
UpperCamelCase = self.special_tokens_decoder[index]
else:
UpperCamelCase = chr(index - self._num_special_tokens )
return token
def UpperCAmelCase_ ( self , A_ )-> int:
'''simple docstring'''
UpperCamelCase = B''
for token in tokens:
if token in self.special_tokens_decoder:
UpperCamelCase = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.added_tokens_decoder:
UpperCamelCase = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.special_tokens_encoder:
UpperCamelCase = token.encode('utf-8' )
elif token in self.added_tokens_encoder:
UpperCamelCase = token.encode('utf-8' )
else:
UpperCamelCase = bytes([ord(A_ )] )
bstring += tok_string
UpperCamelCase = bstring.decode('utf-8' , errors='ignore' )
return string
def UpperCAmelCase_ ( self , A_ , A_ = None )-> Tuple[str]:
'''simple docstring'''
return ()
| 3 |
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
_a : Optional[Any] = 100
_a : Dict = set(range(3, NUM_PRIMES, 2))
primes.add(2)
_a : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_0_0 )
def _a (lowercase__ : int ) -> set[int]:
"""simple docstring"""
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
__snake_case = set()
__snake_case = 42
__snake_case = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def _a (lowercase__ : int = 5_0_0_0 ) -> int | None:
"""simple docstring"""
for number_to_partition in range(1 , lowercase__ ):
if len(partition(lowercase__ ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f'''{solution() = }''')
| 56 | 0 |
"""simple docstring"""
import os
# Precomputes a list of the 100 first triangular numbers
__UpperCamelCase : Optional[int] = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = os.path.dirname(os.path.realpath(_UpperCAmelCase ) )
lowerCAmelCase = os.path.join(_UpperCAmelCase , 'words.txt' )
lowerCAmelCase = ''
with open(_UpperCAmelCase ) as f:
lowerCAmelCase = f.readline()
lowerCAmelCase = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
lowerCAmelCase = [
word
for word in [sum(ord(_UpperCAmelCase ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(_UpperCAmelCase )
if __name__ == "__main__":
print(solution())
| 4 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_a : str = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def _a () -> Dict:
"""simple docstring"""
__snake_case = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
__snake_case = get_sagemaker_input()
else:
__snake_case = get_cluster_input()
return config
def _a (lowercase__ : Union[str, Any]=None ) -> int:
"""simple docstring"""
if subparsers is not None:
__snake_case = subparsers.add_parser('config' , description=lowercase__ )
else:
__snake_case = argparse.ArgumentParser('Accelerate config command' , description=lowercase__ )
parser.add_argument(
'--config_file' , default=lowercase__ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=lowercase__ )
return parser
def _a (lowercase__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case = get_user_input()
if args.config_file is not None:
__snake_case = args.config_file
else:
if not os.path.isdir(lowercase__ ):
os.makedirs(lowercase__ )
__snake_case = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(lowercase__ )
else:
config.to_yaml_file(lowercase__ )
print(f'accelerate configuration saved at {config_file}' )
def _a () -> int:
"""simple docstring"""
__snake_case = config_command_parser()
__snake_case = parser.parse_args()
config_command(lowercase__ )
if __name__ == "__main__":
main()
| 56 | 0 |
'''simple docstring'''
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
_lowercase : Optional[int] = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def _lowercase ( self , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = hf_hub_download(
repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
_lowerCAmelCase = VideoClassificationPipeline(model=_lowercase , image_processor=_lowercase , top_k=2 )
_lowerCAmelCase = [
example_video_filepath,
"""https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4""",
]
return video_classifier, examples
def _lowercase ( self , _lowercase , _lowercase ):
"""simple docstring"""
for example in examples:
_lowerCAmelCase = video_classifier(_lowercase )
self.assertEqual(
_lowercase , [
{"""score""": ANY(_lowercase ), """label""": ANY(_lowercase )},
{"""score""": ANY(_lowercase ), """label""": ANY(_lowercase )},
] , )
@require_torch
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = """hf-internal-testing/tiny-random-VideoMAEForVideoClassification"""
_lowerCAmelCase = VideoMAEFeatureExtractor(
size={"""shortest_edge""": 10} , crop_size={"""height""": 10, """width""": 10} )
_lowerCAmelCase = pipeline(
"""video-classification""" , model=_lowercase , feature_extractor=_lowercase , frame_sampling_rate=4 )
_lowerCAmelCase = hf_hub_download(repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
_lowerCAmelCase = video_classifier(_lowercase , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}] , )
_lowerCAmelCase = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
[{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}],
[{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}],
] , )
@require_tf
def _lowercase ( self ):
"""simple docstring"""
pass
| 5 |
'''simple docstring'''
from __future__ import annotations
import math
def _a (lowercase__ : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowercase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
_a : Dict = [num for num in range(3, 100_001, 2) if not is_prime(num)]
def _a (lowercase__ : int ) -> list[int]:
"""simple docstring"""
if not isinstance(lowercase__ , lowercase__ ):
raise ValueError('n must be an integer' )
if n <= 0:
raise ValueError('n must be >= 0' )
__snake_case = []
for num in range(len(lowercase__ ) ):
__snake_case = 0
while 2 * i * i <= odd_composites[num]:
__snake_case = odd_composites[num] - 2 * i * i
if is_prime(lowercase__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(lowercase__ ) == n:
return list_nums
return []
def _a () -> int:
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 56 | 0 |
from scipy.stats import pearsonr
import datasets
_lowerCamelCase = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
_lowerCamelCase = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
_lowerCamelCase = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
def _snake_case ( self :str ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def _snake_case ( self :Union[str, Any] , __A :List[Any] , __A :Optional[Any] , __A :int=False ) -> int:
"""simple docstring"""
if return_pvalue:
SCREAMING_SNAKE_CASE__ = pearsonr(__A , __A )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__A , __A )[0] )}
| 6 |
'''simple docstring'''
from __future__ import annotations
def _a (lowercase__ : int , lowercase__ : int ) -> list[str]:
"""simple docstring"""
if partitions <= 0:
raise ValueError('partitions must be a positive number!' )
if partitions > number_of_bytes:
raise ValueError('partitions can not > number_of_bytes!' )
__snake_case = number_of_bytes // partitions
__snake_case = []
for i in range(lowercase__ ):
__snake_case = i * bytes_per_partition + 1
__snake_case = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 | 0 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
a = get_tests_dir('''fixtures/test_sentencepiece_bpe_char.model''')
@require_sentencepiece
@require_tokenizers
class lowercase_ ( __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : List[str] = SpeechTaTokenizer
UpperCAmelCase : Tuple = False
UpperCAmelCase : Optional[int] = True
def lowerCAmelCase_ ( self : Tuple ):
super().setUp()
# We have a SentencePiece fixture for testing
_A = SpeechTaTokenizer(_UpperCAmelCase )
_A = AddedToken('<mask>' , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase )
_A = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Tuple ):
_A = 'this is a test'
_A = 'this is a test'
return input_text, output_text
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : Dict=20 , _UpperCAmelCase : str=5 ):
_A , _A = self.get_input_output_texts(_UpperCAmelCase )
_A = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
_A = tokenizer.decode(_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
return text, ids
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = '<pad>'
_A = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-4] , 'œ' )
self.assertEqual(vocab_keys[-2] , '<mask>' )
self.assertEqual(vocab_keys[-1] , '<ctc_blank>' )
self.assertEqual(len(_UpperCAmelCase ) , 81 )
def lowerCAmelCase_ ( self : Optional[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def lowerCAmelCase_ ( self : Any ):
_A = self.get_tokenizers(do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_A = tokenizer.vocab_size
_A = len(_UpperCAmelCase )
self.assertNotEqual(_UpperCAmelCase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_A = ['aaaaa bbbbbb', 'cccccccccdddddddd']
_A = tokenizer.add_tokens(_UpperCAmelCase )
_A = tokenizer.vocab_size
_A = len(_UpperCAmelCase )
self.assertNotEqual(_UpperCAmelCase , 0 )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , len(_UpperCAmelCase ) )
self.assertEqual(_UpperCAmelCase , all_size + len(_UpperCAmelCase ) )
_A = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=_UpperCAmelCase )
self.assertGreaterEqual(len(_UpperCAmelCase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
_A = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
_A = tokenizer.add_special_tokens(_UpperCAmelCase )
_A = tokenizer.vocab_size
_A = len(_UpperCAmelCase )
self.assertNotEqual(_UpperCAmelCase , 0 )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , len(_UpperCAmelCase ) )
self.assertEqual(_UpperCAmelCase , all_size_a + len(_UpperCAmelCase ) )
_A = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=_UpperCAmelCase )
self.assertGreaterEqual(len(_UpperCAmelCase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def lowerCAmelCase_ ( self : str ):
pass
def lowerCAmelCase_ ( self : Any ):
pass
def lowerCAmelCase_ ( self : Dict ):
_A = self.get_tokenizer()
_A = tokenizer.tokenize('This is a test' )
# fmt: off
self.assertListEqual(_UpperCAmelCase , [SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't'] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
_A = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_UpperCAmelCase , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
_A = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
# fmt: off
self.assertListEqual(_UpperCAmelCase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
_A = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
@slow
def lowerCAmelCase_ ( self : List[Any] ):
# Use custom sequence because this tokenizer does not handle numbers.
_A = [
'Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '
'general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '
'Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '
'models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.',
'BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '
'conditioning on both left and right context in all layers.',
'The quick brown fox jumps over the lazy dog.',
]
# fmt: off
_A = {
'input_ids': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name='microsoft/speecht5_asr' , revision='c5ef64c71905caeccde0e4462ef3f9077224c524' , sequences=_UpperCAmelCase , )
| 7 |
'''simple docstring'''
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class _lowercase ( __lowercase ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.0_1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1000 ) -> Tuple:
__snake_case = p_stop
__snake_case = max_length
def __iter__( self : Any ) -> Union[str, Any]:
__snake_case = 0
__snake_case = False
while not stop and count < self.max_length:
yield count
count += 1
__snake_case = random.random() < self.p_stop
class _lowercase ( unittest.TestCase ):
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : str=False , SCREAMING_SNAKE_CASE_ : str=True ) -> Union[str, Any]:
__snake_case = [
BatchSamplerShard(SCREAMING_SNAKE_CASE_ , 2 , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
for i in range(2 )
]
__snake_case = [list(SCREAMING_SNAKE_CASE_ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(SCREAMING_SNAKE_CASE_ ) for shard in batch_sampler_shards] , [len(SCREAMING_SNAKE_CASE_ ) for e in expected] )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Tuple ) -> str:
# Check the shards when the dataset is a round multiple of total batch size.
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] ) -> Union[str, Any]:
# Check the shards when the dataset is a round multiple of batch size.
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size.
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
def a ( self : str ) -> str:
# Check the shards when the dataset is a round multiple of total batch size.
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[[0, 1]], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
def a ( self : int ) -> Tuple:
# Check the shards when the dataset is a round multiple of batch size.
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size.
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[[0, 1]], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[int] ) -> Tuple:
__snake_case = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
__snake_case = [BatchSamplerShard(SCREAMING_SNAKE_CASE_ , 2 , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int=False , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : int=False ) -> List[Any]:
random.seed(SCREAMING_SNAKE_CASE_ )
__snake_case = list(SCREAMING_SNAKE_CASE_ )
__snake_case = [
IterableDatasetShard(
SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , drop_last=SCREAMING_SNAKE_CASE_ , num_processes=SCREAMING_SNAKE_CASE_ , process_index=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , )
for i in range(SCREAMING_SNAKE_CASE_ )
]
__snake_case = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(SCREAMING_SNAKE_CASE_ )
iterable_dataset_lists.append(list(SCREAMING_SNAKE_CASE_ ) )
__snake_case = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
__snake_case = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
self.assertTrue(len(SCREAMING_SNAKE_CASE_ ) % shard_batch_size == 0 )
__snake_case = []
for idx in range(0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(SCREAMING_SNAKE_CASE_ ) < len(SCREAMING_SNAKE_CASE_ ):
reference += reference
self.assertListEqual(SCREAMING_SNAKE_CASE_ , reference[: len(SCREAMING_SNAKE_CASE_ )] )
def a ( self : Dict ) -> Tuple:
__snake_case = 42
__snake_case = RandomIterableDataset()
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Edge case with a very small dataset
__snake_case = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] ) -> str:
__snake_case = BatchSampler(range(16 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = SkipBatchSampler(SCREAMING_SNAKE_CASE_ , 2 )
self.assertListEqual(list(SCREAMING_SNAKE_CASE_ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a ( self : str ) -> Union[str, Any]:
__snake_case = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a ( self : Any ) -> str:
__snake_case = DataLoader(list(range(16 ) ) , batch_size=4 )
__snake_case = skip_first_batches(SCREAMING_SNAKE_CASE_ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a ( self : Dict ) -> Optional[Any]:
__snake_case = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def a ( self : Tuple ) -> Dict:
Accelerator()
__snake_case = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 56 | 0 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=3 , _UpperCAmelCase=30 , _UpperCAmelCase=400 , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=True , _UpperCAmelCase=1 / 255 , _UpperCAmelCase=True , ):
'''simple docstring'''
__A : int = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
__A : List[Any] = parent
__A : Optional[Any] = batch_size
__A : Tuple = num_channels
__A : str = min_resolution
__A : List[Any] = max_resolution
__A : int = do_resize
__A : Any = size
__A : int = do_normalize
__A : Union[str, Any] = image_mean
__A : List[str] = image_std
__A : Dict = do_rescale
__A : List[str] = rescale_factor
__A : Union[str, Any] = do_pad
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase=False):
'''simple docstring'''
if not batched:
__A : str = image_inputs[0]
if isinstance(_UpperCAmelCase , Image.Image):
__A ,__A : Tuple = image.size
else:
__A ,__A : int = image.shape[1], image.shape[2]
if w < h:
__A : Optional[int] = int(self.size['shortest_edge'] * h / w)
__A : int = self.size['shortest_edge']
elif w > h:
__A : List[Any] = self.size['shortest_edge']
__A : Dict = int(self.size['shortest_edge'] * w / h)
else:
__A : Tuple = self.size['shortest_edge']
__A : List[Any] = self.size['shortest_edge']
else:
__A : List[Any] = []
for image in image_inputs:
__A ,__A : Union[str, Any] = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
__A : Union[str, Any] = max(_UpperCAmelCase , key=lambda _UpperCAmelCase: item[0])[0]
__A : Optional[int] = max(_UpperCAmelCase , key=lambda _UpperCAmelCase: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE (a__ , unittest.TestCase ):
lowerCAmelCase = YolosImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = YolosImageProcessingTester(self)
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_UpperCAmelCase , 'image_mean'))
self.assertTrue(hasattr(_UpperCAmelCase , 'image_std'))
self.assertTrue(hasattr(_UpperCAmelCase , 'do_normalize'))
self.assertTrue(hasattr(_UpperCAmelCase , 'do_resize'))
self.assertTrue(hasattr(_UpperCAmelCase , 'size'))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333})
self.assertEqual(image_processor.do_pad , _UpperCAmelCase)
__A : Any = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_UpperCAmelCase)
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84})
self.assertEqual(image_processor.do_pad , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__A : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image)
# Test not batched input
__A : Dict = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
__A ,__A : List[Any] = self.image_processor_tester.get_expected_values(_UpperCAmelCase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A ,__A : str = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase)
__A : List[Any] = image_processing(_UpperCAmelCase , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__A : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray)
# Test not batched input
__A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
__A ,__A : Tuple = self.image_processor_tester.get_expected_values(_UpperCAmelCase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : Optional[Any] = image_processing(_UpperCAmelCase , return_tensors='pt').pixel_values
__A ,__A : List[Any] = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__A : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor)
# Test not batched input
__A : Any = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
__A ,__A : int = self.image_processor_tester.get_expected_values(_UpperCAmelCase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : Any = image_processing(_UpperCAmelCase , return_tensors='pt').pixel_values
__A ,__A : Optional[Any] = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = self.image_processing_class(**self.image_processor_dict)
__A : Dict = self.image_processing_class(do_resize=_UpperCAmelCase , do_normalize=_UpperCAmelCase , do_rescale=_UpperCAmelCase)
# create random PyTorch tensors
__A : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor)
# Test whether the method "pad" and calling the image processor return the same tensors
__A : Any = image_processing_a.pad(_UpperCAmelCase , return_tensors='pt')
__A : Optional[int] = image_processing_a(_UpperCAmelCase , return_tensors='pt')
self.assertTrue(
torch.allclose(encoded_images_with_method['pixel_values'] , encoded_images['pixel_values'] , atol=1e-4))
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r') as f:
__A : int = json.loads(f.read())
__A : Any = {'image_id': 3_9769, 'annotations': target}
# encode them
__A : int = YolosImageProcessor.from_pretrained('hustvl/yolos-small')
__A : str = image_processing(images=_UpperCAmelCase , annotations=_UpperCAmelCase , return_tensors='pt')
# verify pixel values
__A : List[Any] = torch.Size([1, 3, 800, 1066])
self.assertEqual(encoding['pixel_values'].shape , _UpperCAmelCase)
__A : List[str] = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _UpperCAmelCase , atol=1e-4))
# verify area
__A : Union[str, Any] = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438])
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _UpperCAmelCase))
# verify boxes
__A : Tuple = torch.Size([6, 4])
self.assertEqual(encoding['labels'][0]['boxes'].shape , _UpperCAmelCase)
__A : Optional[int] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215])
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _UpperCAmelCase , atol=1e-3))
# verify image_id
__A : Union[str, Any] = torch.tensor([3_9769])
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _UpperCAmelCase))
# verify is_crowd
__A : List[str] = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _UpperCAmelCase))
# verify class_labels
__A : int = torch.tensor([75, 75, 63, 65, 17, 17])
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _UpperCAmelCase))
# verify orig_size
__A : Union[str, Any] = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _UpperCAmelCase))
# verify size
__A : List[str] = torch.tensor([800, 1066])
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _UpperCAmelCase))
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r') as f:
__A : Any = json.loads(f.read())
__A : Optional[int] = {'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target}
__A : Dict = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic')
# encode them
__A : List[Any] = YolosImageProcessor(format='coco_panoptic')
__A : Optional[int] = image_processing(images=_UpperCAmelCase , annotations=_UpperCAmelCase , masks_path=_UpperCAmelCase , return_tensors='pt')
# verify pixel values
__A : str = torch.Size([1, 3, 800, 1066])
self.assertEqual(encoding['pixel_values'].shape , _UpperCAmelCase)
__A : int = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _UpperCAmelCase , atol=1e-4))
# verify area
__A : Union[str, Any] = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147])
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _UpperCAmelCase))
# verify boxes
__A : Optional[Any] = torch.Size([6, 4])
self.assertEqual(encoding['labels'][0]['boxes'].shape , _UpperCAmelCase)
__A : Union[str, Any] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625])
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _UpperCAmelCase , atol=1e-3))
# verify image_id
__A : Union[str, Any] = torch.tensor([3_9769])
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _UpperCAmelCase))
# verify is_crowd
__A : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _UpperCAmelCase))
# verify class_labels
__A : Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93])
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _UpperCAmelCase))
# verify masks
__A : Tuple = 82_2873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _UpperCAmelCase)
# verify orig_size
__A : str = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _UpperCAmelCase))
# verify size
__A : int = torch.tensor([800, 1066])
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _UpperCAmelCase))
| 8 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
_a : int = get_tests_dir("fixtures/test_sentencepiece.model")
_a : Dict = {"target_lang": "fi", "source_lang": "en"}
_a : Optional[int] = ">>zh<<"
_a : List[str] = "Helsinki-NLP/"
if is_torch_available():
_a : List[str] = "pt"
elif is_tf_available():
_a : Dict = "tf"
else:
_a : Union[str, Any] = "jax"
@require_sentencepiece
class _lowercase ( __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : int = MarianTokenizer
_SCREAMING_SNAKE_CASE : str = False
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
def a ( self : int ) -> int:
super().setUp()
__snake_case = ['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>']
__snake_case = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
__snake_case = Path(self.tmpdirname )
save_json(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['vocab'] )
save_json(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['tokenizer_config_file'] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['source_spm'] )
copyfile(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['target_spm'] )
__snake_case = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def a ( self : int , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> MarianTokenizer:
return MarianTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def a ( self : str , SCREAMING_SNAKE_CASE_ : List[str] ) -> List[Any]:
return (
"This is a test",
"This is a test",
)
def a ( self : int ) -> Optional[Any]:
__snake_case = '</s>'
__snake_case = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def a ( self : Dict ) -> List[str]:
__snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '</s>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '<pad>' )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 9 )
def a ( self : List[Any] ) -> str:
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def a ( self : Any ) -> Optional[int]:
__snake_case = MarianTokenizer.from_pretrained(f'{ORG_NAME}opus-mt-en-de' )
__snake_case = en_de_tokenizer(['I am a small frog'] , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = [38, 121, 14, 697, 3_8848, 0]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , batch.input_ids[0] )
__snake_case = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
__snake_case = [x.name for x in Path(SCREAMING_SNAKE_CASE_ ).glob('*' )]
self.assertIn('source.spm' , SCREAMING_SNAKE_CASE_ )
MarianTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[int] ) -> Any:
__snake_case = self.get_tokenizer()
__snake_case = tok(
['I am a small frog' * 1000, 'I am a small frog'] , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def a ( self : Tuple ) -> Dict:
__snake_case = self.get_tokenizer()
__snake_case = tok(['I am a tiny frog', 'I am a small frog'] , padding=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def a ( self : int ) -> int:
# fmt: off
__snake_case = {'input_ids': [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE_ , model_name='Helsinki-NLP/opus-mt-en-de' , revision='1a8c2263da11e68e50938f97e10cd57820bd504c' , decode_kwargs={'use_source_tokenizer': True} , )
def a ( self : Dict ) -> str:
__snake_case = MarianTokenizer.from_pretrained('hf-internal-testing/test-marian-two-vocabs' )
__snake_case = 'Tämä on testi'
__snake_case = 'This is a test'
__snake_case = [76, 7, 2047, 2]
__snake_case = [69, 12, 11, 940, 2]
__snake_case = tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer(text_target=SCREAMING_SNAKE_CASE_ ).input_ids
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 56 | 0 |
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Union[str, Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
def _a ( self : str ):
"""simple docstring"""
A__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
A__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
A__ = 'xvjiarui/stable-diffusion-2-inpainting'
A__ , A__ = FlaxStableDiffusionInpaintPipeline.from_pretrained(_snake_case , safety_checker=_snake_case )
A__ = 'Face of a yellow cat, high resolution, sitting on a park bench'
A__ = jax.random.PRNGKey(0 )
A__ = 50
A__ = jax.device_count()
A__ = num_samples * [prompt]
A__ = num_samples * [init_image]
A__ = num_samples * [mask_image]
A__ , A__ , A__ = pipeline.prepare_inputs(_snake_case , _snake_case , _snake_case )
# shard inputs and rng
A__ = replicate(_snake_case )
A__ = jax.random.split(_snake_case , jax.device_count() )
A__ = shard(_snake_case )
A__ = shard(_snake_case )
A__ = shard(_snake_case )
A__ = pipeline(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , jit=_snake_case )
A__ = output.images.reshape(_snake_case , 5_12 , 5_12 , 3 )
A__ = images[0, 2_53:2_56, 2_53:2_56, -1]
A__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
A__ = jnp.array(
[0.361_1307, 0.3764_9736, 0.375_7408, 0.3821_3953, 0.3929_5167, 0.384_1631, 0.4155_4978, 0.413_7475, 0.421_7084] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 9 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def _a (lowercase__ : bytes ) -> bytes:
"""simple docstring"""
if len(lowercase__ ) != 3_2:
raise ValueError('Input must be of length 32' )
__snake_case = B''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def _a (lowercase__ : int ) -> bytes:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
__snake_case = format(lowercase__ , '08x' )[-8:]
__snake_case = B''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def _a (lowercase__ : bytes ) -> bytes:
"""simple docstring"""
__snake_case = B''
for char in message:
bit_string += format(lowercase__ , '08b' ).encode('utf-8' )
__snake_case = format(len(lowercase__ ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(lowercase__ ) % 5_1_2 != 4_4_8:
bit_string += b"0"
bit_string += to_little_endian(start_len[3_2:] ) + to_little_endian(start_len[:3_2] )
return bit_string
def _a (lowercase__ : bytes ) -> Generator[list[int], None, None]:
"""simple docstring"""
if len(lowercase__ ) % 5_1_2 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(lowercase__ ) , 5_1_2 ):
__snake_case = bit_string[pos : pos + 5_1_2]
__snake_case = []
for i in range(0 , 5_1_2 , 3_2 ):
block_words.append(int(to_little_endian(block[i : i + 3_2] ) , 2 ) )
yield block_words
def _a (lowercase__ : int ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
__snake_case = format(lowercase__ , '032b' )
__snake_case = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(lowercase__ , 2 )
def _a (lowercase__ : int , lowercase__ : int ) -> int:
"""simple docstring"""
return (a + b) % 2**3_2
def _a (lowercase__ : int , lowercase__ : int ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (3_2 - shift))) % 2**3_2
def _a (lowercase__ : bytes ) -> bytes:
"""simple docstring"""
__snake_case = preprocess(lowercase__ )
__snake_case = [int(2**3_2 * abs(sin(i + 1 ) ) ) for i in range(6_4 )]
# Starting states
__snake_case = 0x6_7_4_5_2_3_0_1
__snake_case = 0xE_F_C_D_A_B_8_9
__snake_case = 0x9_8_B_A_D_C_F_E
__snake_case = 0x1_0_3_2_5_4_7_6
__snake_case = [
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(lowercase__ ):
__snake_case = aa
__snake_case = ba
__snake_case = ca
__snake_case = da
# Hash current chunk
for i in range(6_4 ):
if i <= 1_5:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
__snake_case = d ^ (b & (c ^ d))
__snake_case = i
elif i <= 3_1:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
__snake_case = c ^ (d & (b ^ c))
__snake_case = (5 * i + 1) % 1_6
elif i <= 4_7:
__snake_case = b ^ c ^ d
__snake_case = (3 * i + 5) % 1_6
else:
__snake_case = c ^ (b | not_aa(lowercase__ ))
__snake_case = (7 * i) % 1_6
__snake_case = (f + a + added_consts[i] + block_words[g]) % 2**3_2
__snake_case = d
__snake_case = c
__snake_case = b
__snake_case = sum_aa(lowercase__ , left_rotate_aa(lowercase__ , shift_amounts[i] ) )
# Add hashed chunk to running total
__snake_case = sum_aa(lowercase__ , lowercase__ )
__snake_case = sum_aa(lowercase__ , lowercase__ )
__snake_case = sum_aa(lowercase__ , lowercase__ )
__snake_case = sum_aa(lowercase__ , lowercase__ )
__snake_case = reformat_hex(lowercase__ ) + reformat_hex(lowercase__ ) + reformat_hex(lowercase__ ) + reformat_hex(lowercase__ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 | 0 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self : Tuple , _A : str , _A : List[str]=13 , _A : Optional[int]=7 , _A : List[Any]=True , _A : int=True , _A : int=True , _A : Tuple=True , _A : Tuple=99 , _A : Tuple=32 , _A : List[str]=5 , _A : Any=4 , _A : Any=37 , _A : Tuple="gelu" , _A : Optional[int]=0.1 , _A : List[str]=0.1 , _A : int=512 , _A : List[str]=16 , _A : str=2 , _A : Dict=0.02 , _A : str=4 , ):
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_attention_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_choices
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_attention_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = True
_UpperCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = True
UpperCAmelCase = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = FlaxRobertaModelTester(self )
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
for model_class_name in self.all_model_classes:
_UpperCamelCase = model_class_name.from_pretrained('''roberta-base''' , from_pt=_A )
_UpperCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(_A )
| 10 |
'''simple docstring'''
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def _a (lowercase__ : str , lowercase__ : str , lowercase__ : Optional[str] = None ) -> str:
"""simple docstring"""
if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release:
# old versions of hfh don't url-encode the file path
__snake_case = quote(lowercase__ )
return hfh.hf_hub_url(lowercase__ , lowercase__ , repo_type='dataset' , revision=lowercase__ )
| 56 | 0 |
'''simple docstring'''
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def lowerCAmelCase (__A = 8):
"""simple docstring"""
_a = ascii_letters + digits + punctuation
return "".join(secrets.choice(__A) for _ in range(__A))
def lowerCAmelCase (__A , __A):
"""simple docstring"""
i -= len(__A)
_a = i // 3
_a = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
_a = (
chars_incl
+ random(__A , quotient + remainder)
+ random(__A , __A)
+ random(__A , __A)
)
_a = list(__A)
shuffle(__A)
return "".join(__A)
# random is a generalised function for letters, characters and numbers
def lowerCAmelCase (__A , __A):
"""simple docstring"""
return "".join(secrets.choice(__A) for _ in range(__A))
def lowerCAmelCase (__A , __A):
"""simple docstring"""
pass # Put your code here...
def lowerCAmelCase (__A , __A):
"""simple docstring"""
pass # Put your code here...
def lowerCAmelCase (__A , __A):
"""simple docstring"""
pass # Put your code here...
def lowerCAmelCase (__A , __A = 8):
"""simple docstring"""
if len(__A) < min_length:
# Your Password must be at least 8 characters long
return False
_a = any(char in ascii_uppercase for char in password)
_a = any(char in ascii_lowercase for char in password)
_a = any(char in digits for char in password)
_a = any(char in punctuation for char in password)
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def lowerCAmelCase ():
"""simple docstring"""
_a = int(input('''Please indicate the max length of your password: ''').strip())
_a = input(
'''Please indicate the characters that must be in your password: ''').strip()
print('''Password generated:''' , password_generator(__A))
print(
'''Alternative Password generated:''' , alternative_password_generator(__A , __A) , )
print('''[If you are thinking of using this passsword, You better save it.]''')
if __name__ == "__main__":
main()
| 11 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def _a (lowercase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _lowercase ( nn.Module ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : nn.Module , SCREAMING_SNAKE_CASE_ : int ) -> str:
super().__init__()
__snake_case = module
__snake_case = nn.Sequential(
nn.Linear(module.in_features , SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ ) , nn.Linear(SCREAMING_SNAKE_CASE_ , module.out_features , bias=SCREAMING_SNAKE_CASE_ ) , )
__snake_case = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=SCREAMING_SNAKE_CASE_ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Union[str, Any]:
return self.module(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) + self.adapter(SCREAMING_SNAKE_CASE_ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowercase ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
_SCREAMING_SNAKE_CASE : Tuple = "bigscience/bloom-1b7"
# Constant values
_SCREAMING_SNAKE_CASE : Union[str, Any] = 2.109659552692574
_SCREAMING_SNAKE_CASE : Optional[Any] = "Hello my name is"
_SCREAMING_SNAKE_CASE : List[str] = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
_SCREAMING_SNAKE_CASE : Dict = 1_0
def a ( self : Optional[Any] ) -> List[Any]:
# Models and tokenizer
__snake_case = AutoTokenizer.from_pretrained(self.model_name )
class _lowercase ( __lowercase ):
def a ( self : Union[str, Any] ) -> List[str]:
super().setUp()
# Models and tokenizer
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto' )
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
def a ( self : Optional[Any] ) -> Any:
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def a ( self : Optional[Any] ) -> int:
__snake_case = self.model_abit.config
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'quantization_config' ) )
__snake_case = config.to_dict()
__snake_case = config.to_diff_dict()
__snake_case = config.to_json_string()
def a ( self : Optional[Any] ) -> str:
from bitsandbytes.nn import Paramsabit
__snake_case = self.model_fpaa.get_memory_footprint()
__snake_case = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
__snake_case = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def a ( self : Union[str, Any] ) -> Optional[Any]:
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(SCREAMING_SNAKE_CASE_ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def a ( self : Union[str, Any] ) -> int:
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
__snake_case = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) , self.EXPECTED_OUTPUTS )
def a ( self : Optional[Any] ) -> Dict:
__snake_case = BitsAndBytesConfig()
__snake_case = True
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
__snake_case = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) , self.EXPECTED_OUTPUTS )
def a ( self : List[Any] ) -> str:
with self.assertRaises(SCREAMING_SNAKE_CASE_ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(SCREAMING_SNAKE_CASE_ )
def a ( self : Any ) -> Union[str, Any]:
__snake_case = BitsAndBytesConfig()
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=SCREAMING_SNAKE_CASE_ , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' , bnb_abit_quant_type='nf4' , )
def a ( self : Tuple ) -> Dict:
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with `str`
self.model_abit.to('cpu' )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0' ) )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
__snake_case = self.model_fpaa.to(torch.floataa )
__snake_case = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
__snake_case = self.model_fpaa.to('cpu' )
# Check this does not throw an error
__snake_case = self.model_fpaa.half()
# Check this does not throw an error
__snake_case = self.model_fpaa.float()
def a ( self : Tuple ) -> Union[str, Any]:
__snake_case = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowercase ( unittest.TestCase ):
@classmethod
def a ( cls : Union[str, Any] ) -> Dict:
__snake_case = 't5-small'
__snake_case = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
__snake_case = AutoTokenizer.from_pretrained(cls.model_name )
__snake_case = 'Translate in German: Hello, my dog is cute'
def a ( self : List[Any] ) -> str:
gc.collect()
torch.cuda.empty_cache()
def a ( self : int ) -> Optional[Any]:
from transformers import TaForConditionalGeneration
__snake_case = TaForConditionalGeneration._keep_in_fpaa_modules
__snake_case = None
# test with `t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
# test with `flan-t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
__snake_case = modules
def a ( self : List[str] ) -> Any:
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
# test with `flan-t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
class _lowercase ( __lowercase ):
def a ( self : Dict ) -> str:
super().setUp()
# model_name
__snake_case = 'bigscience/bloom-560m'
__snake_case = 't5-small'
# Different types of model
__snake_case = AutoModel.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# Sequence classification model
__snake_case = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# CausalLM model
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# Seq2seq model
__snake_case = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
def a ( self : int ) -> Dict:
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def a ( self : Any ) -> Optional[Any]:
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class _lowercase ( __lowercase ):
def a ( self : str ) -> Union[str, Any]:
super().setUp()
def a ( self : Optional[Any] ) -> str:
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def a ( self : Optional[int] ) -> List[str]:
__snake_case = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__snake_case = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class _lowercase ( __lowercase ):
def a ( self : Optional[int] ) -> Union[str, Any]:
super().setUp()
def a ( self : Optional[int] ) -> List[Any]:
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='balanced' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
# Second real batch
__snake_case = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) , self.EXPECTED_OUTPUTS )
class _lowercase ( __lowercase ):
def a ( self : Any ) -> str:
__snake_case = 'facebook/opt-350m'
super().setUp()
def a ( self : int ) -> List[Any]:
if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ):
return
# Step 1: freeze all parameters
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
__snake_case = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__snake_case = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(SCREAMING_SNAKE_CASE_ ) ):
__snake_case = LoRALayer(module.q_proj , rank=16 )
__snake_case = LoRALayer(module.k_proj , rank=16 )
__snake_case = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
__snake_case = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__snake_case = model.forward(**SCREAMING_SNAKE_CASE_ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(SCREAMING_SNAKE_CASE_ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = "gpt2-xl"
_SCREAMING_SNAKE_CASE : Optional[int] = 3.3191854854152187
| 56 | 0 |
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
lowerCamelCase__ : List[Any] = pytest.mark.integration
lowerCamelCase__ : Dict = {"""comet"""}
lowerCamelCase__ : Optional[int] = importlib.util.find_spec("""fairseq""") is not None
lowerCamelCase__ : Tuple = {"""code_eval"""}
lowerCamelCase__ : str = os.name == """nt"""
lowerCamelCase__ : int = {"""bertscore""", """frugalscore""", """perplexity"""}
lowerCamelCase__ : Tuple = importlib.util.find_spec("""transformers""") is not None
def UpperCamelCase ( lowercase_ ) -> Optional[Any]:
'''simple docstring'''
@wraps(lowercase_ )
def wrapper(self , lowercase_ ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("""\"test requires Fairseq\"""" )
else:
test_case(self , lowercase_ )
return wrapper
def UpperCamelCase ( lowercase_ ) -> Any:
'''simple docstring'''
@wraps(lowercase_ )
def wrapper(self , lowercase_ ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("""\"test requires transformers\"""" )
else:
test_case(self , lowercase_ )
return wrapper
def UpperCamelCase ( lowercase_ ) -> Any:
'''simple docstring'''
@wraps(lowercase_ )
def wrapper(self , lowercase_ ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("""\"test not supported on Windows\"""" )
else:
test_case(self , lowercase_ )
return wrapper
def UpperCamelCase ( ) -> List[str]:
'''simple docstring'''
lowercase__ : Union[str, Any] = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("""./metrics/*/""" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
@local
class _snake_case ( parameterized.TestCase ):
__lowerCAmelCase : Any = {}
__lowerCAmelCase : str = None
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""")
@pytest.mark.filterwarnings("""ignore:load_metric is deprecated:FutureWarning""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : int = """[...]"""
lowercase__ : Optional[Any] = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("""metrics""" , SCREAMING_SNAKE_CASE_)).module_path)
lowercase__ : List[str] = datasets.load.import_main_class(metric_module.__name__ , dataset=SCREAMING_SNAKE_CASE_)
# check parameters
lowercase__ : Any = inspect.signature(metric._compute).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values())) # no **kwargs
# run doctest
with self.patch_intensive_calls(SCREAMING_SNAKE_CASE_ , metric_module.__name__):
with self.use_local_metrics():
try:
lowercase__ : int = doctest.testmod(SCREAMING_SNAKE_CASE_ , verbose=SCREAMING_SNAKE_CASE_ , raise_on_error=SCREAMING_SNAKE_CASE_)
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0)
self.assertGreater(results.attempted , 1)
@slow
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Optional[int] = """[...]"""
lowercase__ : Union[str, Any] = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("""metrics""" , SCREAMING_SNAKE_CASE_)).module_path)
# run doctest
with self.use_local_metrics():
lowercase__ : Union[str, Any] = doctest.testmod(SCREAMING_SNAKE_CASE_ , verbose=SCREAMING_SNAKE_CASE_ , raise_on_error=SCREAMING_SNAKE_CASE_)
self.assertEqual(results.failed , 0)
self.assertGreater(results.attempted , 1)
@contextmanager
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](SCREAMING_SNAKE_CASE_):
yield
else:
yield
@contextmanager
def lowercase__ ( self):
'''simple docstring'''
def load_local_metric(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
return load_metric(os.path.join("""metrics""" , SCREAMING_SNAKE_CASE_) , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
with patch("""datasets.load_metric""") as mock_load_metric:
lowercase__ : Union[str, Any] = load_local_metric
yield
@classmethod
def lowercase__ ( cls , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
def wrapper(SCREAMING_SNAKE_CASE_):
lowercase__ : Any = contextmanager(SCREAMING_SNAKE_CASE_)
lowercase__ : Union[str, Any] = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("""bleurt""" )
def UpperCamelCase ( lowercase_ ) -> Optional[int]:
'''simple docstring'''
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("""sv""" , """""" , """""" ) # handle pytest cli flags
class _snake_case ( UpperCAmelCase_ ):
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
assert len(input_dict["""input_ids"""]) == 2
return np.array([1.0_3, 1.0_4])
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("""bleurt.score._create_predictor""" ) as mock_create_predictor:
lowercase__ : str = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("""bertscore""" )
def UpperCamelCase ( lowercase_ ) -> int:
'''simple docstring'''
import torch
def bert_cos_score_idf(lowercase_ , lowercase_ , *lowercase_ , **lowercase_ ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(lowercase_ ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("""bert_score.scorer.get_model""" ), patch(
"""bert_score.scorer.bert_cos_score_idf""" ) as mock_bert_cos_score_idf:
lowercase__ : int = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("""comet""" )
def UpperCamelCase ( lowercase_ ) -> Dict:
'''simple docstring'''
def load_from_checkpoint(lowercase_ ):
class _snake_case :
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
assert len(SCREAMING_SNAKE_CASE_) == 2
lowercase__ : Optional[Any] = [0.1_9, 0.9_2]
return scores, sum(SCREAMING_SNAKE_CASE_) / len(SCREAMING_SNAKE_CASE_)
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("""comet.download_model""" ) as mock_download_model:
lowercase__ : Union[str, Any] = None
with patch("""comet.load_from_checkpoint""" ) as mock_load_from_checkpoint:
lowercase__ : List[Any] = load_from_checkpoint
yield
def UpperCamelCase ( ) -> List[Any]:
'''simple docstring'''
lowercase__ : Optional[Any] = load_metric(os.path.join("""metrics""" , """seqeval""" ) )
lowercase__ : int = """ERROR"""
lowercase__ : Tuple = F'Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}'
with pytest.raises(lowercase_ , match=re.escape(lowercase_ ) ):
metric.compute(predictions=[] , references=[] , scheme=lowercase_ )
| 12 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class _lowercase ( unittest.TestCase ):
def a ( self : int ) -> List[str]:
__snake_case = '| <pad> <unk> <s> </s> a b c d e f g h i j k'.split()
__snake_case = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
__snake_case = {
'unk_token': '<unk>',
'bos_token': '<s>',
'eos_token': '</s>',
}
__snake_case = {
'feature_size': 1,
'padding_value': 0.0,
'sampling_rate': 1_6000,
'return_attention_mask': False,
'do_normalize': True,
}
__snake_case = tempfile.mkdtemp()
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__snake_case = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE_ )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
with open(self.feature_extraction_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
# load decoder from hub
__snake_case = 'hf-internal-testing/ngram-beam-search-decoder'
def a ( self : Optional[int] , **SCREAMING_SNAKE_CASE_ : Tuple ) -> Dict:
__snake_case = self.add_kwargs_tokens_map.copy()
kwargs.update(SCREAMING_SNAKE_CASE_ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] , **SCREAMING_SNAKE_CASE_ : Any ) -> Optional[Any]:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def a ( self : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> Tuple:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **SCREAMING_SNAKE_CASE_ )
def a ( self : int ) -> Dict:
shutil.rmtree(self.tmpdirname )
def a ( self : int ) -> Tuple:
__snake_case = self.get_tokenizer()
__snake_case = self.get_feature_extractor()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
__snake_case = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE_ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , SCREAMING_SNAKE_CASE_ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , SCREAMING_SNAKE_CASE_ )
def a ( self : Dict ) -> Union[str, Any]:
__snake_case = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__snake_case = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def a ( self : str ) -> Tuple:
__snake_case = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['xx'] )
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE_ , 'include' ):
WavaVecaProcessorWithLM(
tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def a ( self : List[str] ) -> List[str]:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = floats_list((3, 1000) )
__snake_case = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
__snake_case = processor(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def a ( self : Tuple ) -> Tuple:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = 'This is a test string'
__snake_case = processor(text=SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer(SCREAMING_SNAKE_CASE_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a ( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any]=(2, 10, 16) , SCREAMING_SNAKE_CASE_ : Dict=77 ) -> Dict:
np.random.seed(SCREAMING_SNAKE_CASE_ )
return np.random.rand(*SCREAMING_SNAKE_CASE_ )
def a ( self : Any ) -> Tuple:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits(shape=(10, 16) , seed=13 )
__snake_case = processor.decode(SCREAMING_SNAKE_CASE_ )
__snake_case = decoder.decode_beams(SCREAMING_SNAKE_CASE_ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('</s> <s> </s>' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['fork'], ['spawn']] )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] ) -> Dict:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__snake_case = processor.batch_decode(SCREAMING_SNAKE_CASE_ )
else:
with get_context(SCREAMING_SNAKE_CASE_ ).Pool() as pool:
__snake_case = processor.batch_decode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = list(SCREAMING_SNAKE_CASE_ )
with get_context('fork' ).Pool() as p:
__snake_case = decoder.decode_beams_batch(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case , __snake_case , __snake_case = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.text )
self.assertListEqual(['<s> <s> </s>', '<s> <s> <s>'] , decoded_processor.text )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.logit_score )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.lm_score )
def a ( self : Any ) -> Dict:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits()
__snake_case = 15
__snake_case = -2_0.0
__snake_case = -4.0
__snake_case = processor.batch_decode(
SCREAMING_SNAKE_CASE_ , beam_width=SCREAMING_SNAKE_CASE_ , beam_prune_logp=SCREAMING_SNAKE_CASE_ , token_min_logp=SCREAMING_SNAKE_CASE_ , )
__snake_case = decoded_processor_out.text
__snake_case = list(SCREAMING_SNAKE_CASE_ )
with get_context('fork' ).Pool() as pool:
__snake_case = decoder.decode_beams_batch(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , beam_width=SCREAMING_SNAKE_CASE_ , beam_prune_logp=SCREAMING_SNAKE_CASE_ , token_min_logp=SCREAMING_SNAKE_CASE_ , )
__snake_case = [d[0][0] for d in decoded_decoder_out]
__snake_case = [d[0][2] for d in decoded_decoder_out]
__snake_case = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(['</s> <s> <s>', '<s> <s> <s>'] , SCREAMING_SNAKE_CASE_ )
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
def a ( self : Optional[Any] ) -> Tuple:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits()
__snake_case = 2.0
__snake_case = 5.0
__snake_case = -2_0.0
__snake_case = True
__snake_case = processor.batch_decode(
SCREAMING_SNAKE_CASE_ , alpha=SCREAMING_SNAKE_CASE_ , beta=SCREAMING_SNAKE_CASE_ , unk_score_offset=SCREAMING_SNAKE_CASE_ , lm_score_boundary=SCREAMING_SNAKE_CASE_ , )
__snake_case = decoded_processor_out.text
__snake_case = list(SCREAMING_SNAKE_CASE_ )
decoder.reset_params(
alpha=SCREAMING_SNAKE_CASE_ , beta=SCREAMING_SNAKE_CASE_ , unk_score_offset=SCREAMING_SNAKE_CASE_ , lm_score_boundary=SCREAMING_SNAKE_CASE_ , )
with get_context('fork' ).Pool() as pool:
__snake_case = decoder.decode_beams_batch(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
__snake_case = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(['<s> </s> <s> </s> </s>', '</s> </s> <s> </s> </s>'] , SCREAMING_SNAKE_CASE_ )
__snake_case = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -2_0.0 )
self.assertEqual(lm_model.score_boundary , SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] ) -> List[str]:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = processor.decoder.model_container[processor.decoder._model_key]
__snake_case = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
__snake_case = os.listdir(SCREAMING_SNAKE_CASE_ )
__snake_case = ['alphabet.json', 'language_model']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Dict ) -> Dict:
__snake_case = snapshot_download('hf-internal-testing/processor_with_lm' )
__snake_case = WavaVecaProcessorWithLM.from_pretrained(SCREAMING_SNAKE_CASE_ )
__snake_case = processor.decoder.model_container[processor.decoder._model_key]
__snake_case = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
__snake_case = os.listdir(SCREAMING_SNAKE_CASE_ )
__snake_case = os.listdir(SCREAMING_SNAKE_CASE_ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Any ) -> List[Any]:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = AutoProcessor.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = floats_list((3, 1000) )
__snake_case = processor_wavaveca(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
__snake_case = processor_auto(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
__snake_case = self._get_dummy_logits()
__snake_case = processor_wavaveca.batch_decode(SCREAMING_SNAKE_CASE_ )
__snake_case = processor_auto.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def a ( self : Dict ) -> Optional[int]:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
@staticmethod
def a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int:
__snake_case = [d[key] for d in offsets]
return retrieved_list
def a ( self : Optional[int] ) -> str:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = self._get_dummy_logits()[0]
__snake_case = processor.decode(SCREAMING_SNAKE_CASE_ , output_word_offsets=SCREAMING_SNAKE_CASE_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(' '.join(self.get_from_offsets(outputs['word_offsets'] , 'word' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'end_offset' ) , [1, 3, 5] )
def a ( self : Optional[Any] ) -> Optional[int]:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = self._get_dummy_logits()
__snake_case = processor.batch_decode(SCREAMING_SNAKE_CASE_ , output_word_offsets=SCREAMING_SNAKE_CASE_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(
[' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) for o in outputs['word_offsets']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'end_offset' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def a ( self : Optional[Any] ) -> Optional[Any]:
import torch
__snake_case = load_dataset('common_voice' , 'en' , split='train' , streaming=SCREAMING_SNAKE_CASE_ )
__snake_case = ds.cast_column('audio' , datasets.Audio(sampling_rate=1_6000 ) )
__snake_case = iter(SCREAMING_SNAKE_CASE_ )
__snake_case = next(SCREAMING_SNAKE_CASE_ )
__snake_case = AutoProcessor.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
__snake_case = WavaVecaForCTC.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__snake_case = processor(sample['audio']['array'] , return_tensors='pt' ).input_values
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ ).logits.cpu().numpy()
__snake_case = processor.decode(logits[0] , output_word_offsets=SCREAMING_SNAKE_CASE_ )
__snake_case = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__snake_case = [
{
'start_time': d['start_offset'] * time_offset,
'end_time': d['end_offset'] * time_offset,
'word': d['word'],
}
for d in output['word_offsets']
]
__snake_case = 'WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'
# output words
self.assertEqual(' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) , output.text )
# output times
__snake_case = torch.tensor(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'start_time' ) )
__snake_case = torch.tensor(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'end_time' ) )
# fmt: off
__snake_case = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
__snake_case = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=0.0_1 ) )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=0.0_1 ) )
| 56 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : Dict = logging.get_logger(__name__)
A__ : List[str] = {
"""google/vit-base-patch16-224""": """https://huggingface.co/vit-base-patch16-224/resolve/main/config.json""",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = 'vit'
def __init__( self , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=2_24 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=16 , **SCREAMING_SNAKE_CASE_ , ) -> Tuple:
super().__init__(**SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[str] = hidden_size
__lowerCamelCase : Any = num_hidden_layers
__lowerCamelCase : Union[str, Any] = num_attention_heads
__lowerCamelCase : Optional[int] = intermediate_size
__lowerCamelCase : Union[str, Any] = hidden_act
__lowerCamelCase : Dict = hidden_dropout_prob
__lowerCamelCase : int = attention_probs_dropout_prob
__lowerCamelCase : Any = initializer_range
__lowerCamelCase : str = layer_norm_eps
__lowerCamelCase : List[str] = image_size
__lowerCamelCase : int = patch_size
__lowerCamelCase : List[Any] = num_channels
__lowerCamelCase : List[Any] = qkv_bias
__lowerCamelCase : Union[str, Any] = encoder_stride
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = version.parse('1.11' )
@property
def lowercase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowercase_ ( self ) -> float:
return 1E-4
| 13 |
'''simple docstring'''
def _a (lowercase__ : int , lowercase__ : int ) -> float:
"""simple docstring"""
return base * power(lowercase__ , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print("Raise base to the power of exponent using recursion...")
_a : Union[str, Any] = int(input("Enter the base: ").strip())
_a : Any = int(input("Enter the exponent: ").strip())
_a : List[str] = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
_a : List[Any] = 1 / result
print(f'''{base} to the power of {exponent} is {result}''')
| 56 | 0 |
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def __UpperCAmelCase ( __a : int ,__a : Dict=1 ) -> str:
"""simple docstring"""
if n_shave_prefix_segments >= 0:
return ".".join(path.split('''.''' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('''.''' )[:n_shave_prefix_segments] )
def __UpperCAmelCase ( __a : List[Any] ,__a : List[str]=0 ) -> Optional[Any]:
"""simple docstring"""
_a : Dict = []
for old_item in old_list:
_a : Optional[Any] = old_item.replace('''in_layers.0''' ,'''norm1''' )
_a : int = new_item.replace('''in_layers.2''' ,'''conv1''' )
_a : List[Any] = new_item.replace('''out_layers.0''' ,'''norm2''' )
_a : int = new_item.replace('''out_layers.3''' ,'''conv2''' )
_a : Optional[Any] = new_item.replace('''emb_layers.1''' ,'''time_emb_proj''' )
_a : int = new_item.replace('''skip_connection''' ,'''conv_shortcut''' )
_a : Optional[int] = shave_segments(__a ,n_shave_prefix_segments=__a )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def __UpperCAmelCase ( __a : str ,__a : Tuple=0 ) -> Dict:
"""simple docstring"""
_a : int = []
for old_item in old_list:
_a : Union[str, Any] = old_item
_a : str = new_item.replace('''norm.weight''' ,'''group_norm.weight''' )
_a : str = new_item.replace('''norm.bias''' ,'''group_norm.bias''' )
_a : Optional[int] = new_item.replace('''proj_out.weight''' ,'''proj_attn.weight''' )
_a : List[str] = new_item.replace('''proj_out.bias''' ,'''proj_attn.bias''' )
_a : Union[str, Any] = shave_segments(__a ,n_shave_prefix_segments=__a )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def __UpperCAmelCase ( __a : Tuple ,__a : Dict ,__a : Dict ,__a : List[str]=None ,__a : Any=None ,__a : List[str]=None ) -> Any:
"""simple docstring"""
assert isinstance(__a ,__a ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
_a : Any = old_checkpoint[path]
_a : Any = old_tensor.shape[0] // 3
_a : str = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
_a : Tuple = old_tensor.shape[0] // config['''num_head_channels'''] // 3
_a : List[Any] = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
_a , _a , _a : Optional[Any] = old_tensor.split(channels // num_heads ,dim=1 )
_a : List[str] = query.reshape(__a )
_a : List[str] = key.reshape(__a )
_a : Dict = value.reshape(__a )
for path in paths:
_a : Optional[int] = path['''new''']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
_a : Optional[int] = new_path.replace('''middle_block.0''' ,'''mid_block.resnets.0''' )
_a : List[Any] = new_path.replace('''middle_block.1''' ,'''mid_block.attentions.0''' )
_a : Tuple = new_path.replace('''middle_block.2''' ,'''mid_block.resnets.1''' )
if additional_replacements is not None:
for replacement in additional_replacements:
_a : Dict = new_path.replace(replacement['''old'''] ,replacement['''new'''] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
_a : Any = old_checkpoint[path['''old''']][:, :, 0]
else:
_a : str = old_checkpoint[path['''old''']]
def __UpperCAmelCase ( __a : List[Any] ,__a : Dict ) -> Tuple:
"""simple docstring"""
_a : List[Any] = {}
_a : Tuple = checkpoint['''time_embed.0.weight''']
_a : str = checkpoint['''time_embed.0.bias''']
_a : Dict = checkpoint['''time_embed.2.weight''']
_a : int = checkpoint['''time_embed.2.bias''']
_a : Dict = checkpoint['''input_blocks.0.0.weight''']
_a : List[Any] = checkpoint['''input_blocks.0.0.bias''']
_a : Tuple = checkpoint['''out.0.weight''']
_a : str = checkpoint['''out.0.bias''']
_a : str = checkpoint['''out.2.weight''']
_a : int = checkpoint['''out.2.bias''']
# Retrieves the keys for the input blocks only
_a : Optional[int] = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''input_blocks''' in layer} )
_a : List[Any] = {
layer_id: [key for key in checkpoint if F"""input_blocks.{layer_id}""" in key]
for layer_id in range(__a )
}
# Retrieves the keys for the middle blocks only
_a : int = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''middle_block''' in layer} )
_a : str = {
layer_id: [key for key in checkpoint if F"""middle_block.{layer_id}""" in key]
for layer_id in range(__a )
}
# Retrieves the keys for the output blocks only
_a : Tuple = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''output_blocks''' in layer} )
_a : Optional[Any] = {
layer_id: [key for key in checkpoint if F"""output_blocks.{layer_id}""" in key]
for layer_id in range(__a )
}
for i in range(1 ,__a ):
_a : Any = (i - 1) // (config['''num_res_blocks'''] + 1)
_a : List[Any] = (i - 1) % (config['''num_res_blocks'''] + 1)
_a : str = [key for key in input_blocks[i] if F"""input_blocks.{i}.0""" in key]
_a : List[str] = [key for key in input_blocks[i] if F"""input_blocks.{i}.1""" in key]
if F"""input_blocks.{i}.0.op.weight""" in checkpoint:
_a : Optional[Any] = checkpoint[
F"""input_blocks.{i}.0.op.weight"""
]
_a : Any = checkpoint[
F"""input_blocks.{i}.0.op.bias"""
]
continue
_a : List[Any] = renew_resnet_paths(__a )
_a : Any = {'''old''': F"""input_blocks.{i}.0""", '''new''': F"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""}
_a : Optional[int] = {'''old''': '''resnets.2.op''', '''new''': '''downsamplers.0.op'''}
assign_to_checkpoint(
__a ,__a ,__a ,additional_replacements=[meta_path, resnet_op] ,config=__a )
if len(__a ):
_a : Union[str, Any] = renew_attention_paths(__a )
_a : List[Any] = {
'''old''': F"""input_blocks.{i}.1""",
'''new''': F"""down_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
_a : Union[str, Any] = {
F"""input_blocks.{i}.1.qkv.bias""": {
'''key''': F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'''query''': F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'''value''': F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
F"""input_blocks.{i}.1.qkv.weight""": {
'''key''': F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'''query''': F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'''value''': F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
__a ,__a ,__a ,additional_replacements=[meta_path] ,attention_paths_to_split=__a ,config=__a ,)
_a : int = middle_blocks[0]
_a : str = middle_blocks[1]
_a : int = middle_blocks[2]
_a : int = renew_resnet_paths(__a )
assign_to_checkpoint(__a ,__a ,__a ,config=__a )
_a : Dict = renew_resnet_paths(__a )
assign_to_checkpoint(__a ,__a ,__a ,config=__a )
_a : str = renew_attention_paths(__a )
_a : Dict = {
'''middle_block.1.qkv.bias''': {
'''key''': '''mid_block.attentions.0.key.bias''',
'''query''': '''mid_block.attentions.0.query.bias''',
'''value''': '''mid_block.attentions.0.value.bias''',
},
'''middle_block.1.qkv.weight''': {
'''key''': '''mid_block.attentions.0.key.weight''',
'''query''': '''mid_block.attentions.0.query.weight''',
'''value''': '''mid_block.attentions.0.value.weight''',
},
}
assign_to_checkpoint(
__a ,__a ,__a ,attention_paths_to_split=__a ,config=__a )
for i in range(__a ):
_a : str = i // (config['''num_res_blocks'''] + 1)
_a : Union[str, Any] = i % (config['''num_res_blocks'''] + 1)
_a : List[Any] = [shave_segments(__a ,2 ) for name in output_blocks[i]]
_a : str = {}
for layer in output_block_layers:
_a , _a : Optional[Any] = layer.split('''.''' )[0], shave_segments(__a ,1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(__a )
else:
_a : int = [layer_name]
if len(__a ) > 1:
_a : Dict = [key for key in output_blocks[i] if F"""output_blocks.{i}.0""" in key]
_a : List[Any] = [key for key in output_blocks[i] if F"""output_blocks.{i}.1""" in key]
_a : Tuple = renew_resnet_paths(__a )
_a : Tuple = renew_resnet_paths(__a )
_a : str = {'''old''': F"""output_blocks.{i}.0""", '''new''': F"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""}
assign_to_checkpoint(__a ,__a ,__a ,additional_replacements=[meta_path] ,config=__a )
if ["conv.weight", "conv.bias"] in output_block_list.values():
_a : List[Any] = list(output_block_list.values() ).index(['''conv.weight''', '''conv.bias'''] )
_a : List[str] = checkpoint[
F"""output_blocks.{i}.{index}.conv.weight"""
]
_a : Union[str, Any] = checkpoint[
F"""output_blocks.{i}.{index}.conv.bias"""
]
# Clear attentions as they have been attributed above.
if len(__a ) == 2:
_a : Tuple = []
if len(__a ):
_a : Optional[int] = renew_attention_paths(__a )
_a : str = {
'''old''': F"""output_blocks.{i}.1""",
'''new''': F"""up_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
_a : Tuple = {
F"""output_blocks.{i}.1.qkv.bias""": {
'''key''': F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'''query''': F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'''value''': F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
F"""output_blocks.{i}.1.qkv.weight""": {
'''key''': F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'''query''': F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'''value''': F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
__a ,__a ,__a ,additional_replacements=[meta_path] ,attention_paths_to_split=to_split if any('''qkv''' in key for key in attentions ) else None ,config=__a ,)
else:
_a : str = renew_resnet_paths(__a ,n_shave_prefix_segments=1 )
for path in resnet_0_paths:
_a : Any = '''.'''.join(['''output_blocks''', str(__a ), path['''old''']] )
_a : Tuple = '''.'''.join(['''up_blocks''', str(__a ), '''resnets''', str(__a ), path['''new''']] )
_a : Optional[int] = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
a__ = parser.parse_args()
a__ = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
a__ = json.loads(f.read())
a__ = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
a__ = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
a__ = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
a__ = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
a__ = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 14 |
'''simple docstring'''
import math
from collections.abc import Callable
def _a (lowercase__ : Callable[[float], float] , lowercase__ : float , lowercase__ : float ) -> float:
"""simple docstring"""
__snake_case = xa
__snake_case = xa
while True:
if x_n == x_na or function(lowercase__ ) == function(lowercase__ ):
raise ZeroDivisionError('float division by zero, could not find root' )
__snake_case = x_na - (
function(lowercase__ ) / ((function(lowercase__ ) - function(lowercase__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 1_0**-5:
return x_na
__snake_case = x_na
__snake_case = x_na
def _a (lowercase__ : float ) -> float:
"""simple docstring"""
return math.pow(lowercase__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 56 | 0 |
def UpperCamelCase ( __magic_name__ : list ) -> list:
"""simple docstring"""
def merge(__magic_name__ : list , __magic_name__ : list ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(__magic_name__ ) <= 1:
return collection
lowercase__ = len(__magic_name__ ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
A : Any = input('Enter numbers separated by a comma:\n').strip()
A : str = [int(item) for item in user_input.split(',')]
print(*merge_sort(unsorted), sep=',')
| 15 |
'''simple docstring'''
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class _lowercase ( __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : str = CpmAntTokenizer
_SCREAMING_SNAKE_CASE : Optional[Any] = False
def a ( self : Optional[Any] ) -> Any:
super().setUp()
__snake_case = [
'<d>',
'</d>',
'<s>',
'</s>',
'</_>',
'<unk>',
'<pad>',
'</n>',
'我',
'是',
'C',
'P',
'M',
'A',
'n',
't',
]
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
@tooslow
def a ( self : List[Any] ) -> Dict:
__snake_case = CpmAntTokenizer.from_pretrained('openbmb/cpm-ant-10b' )
__snake_case = '今天天气真好!'
__snake_case = ['今天', '天气', '真', '好', '!']
__snake_case = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = '今天天气真好!'
__snake_case = [tokenizer.bos_token] + tokens
__snake_case = [6, 9802, 1_4962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 56 | 0 |
import numpy as np
def __a ( A__ : np.ndarray ):
return 1 / (1 + np.exp(-vector ))
def __a ( A__ : np.ndarray ):
return vector * sigmoid(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def _a (lowercase__ : list ) -> int:
"""simple docstring"""
if not postfix_notation:
return 0
__snake_case = {'+', '-', '*', '/'}
__snake_case = []
for token in postfix_notation:
if token in operations:
__snake_case , __snake_case = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(lowercase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 | 0 |
from math import factorial
def __SCREAMING_SNAKE_CASE ( a__ : int = 100 ) -> int:
return sum(int(a__ ) for x in str(factorial(a__ ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 17 |
'''simple docstring'''
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square(lowercase__ : int , lowercase__ : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
__snake_case = update_area_of_max_square(lowercase__ , col + 1 )
__snake_case = update_area_of_max_square(row + 1 , col + 1 )
__snake_case = update_area_of_max_square(row + 1 , lowercase__ )
if mat[row][col]:
__snake_case = 1 + min([right, diagonal, down] )
__snake_case = max(largest_square_area[0] , lowercase__ )
return sub_problem_sol
else:
return 0
__snake_case = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
__snake_case = update_area_of_max_square_using_dp_array(lowercase__ , col + 1 , lowercase__ )
__snake_case = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , lowercase__ )
__snake_case = update_area_of_max_square_using_dp_array(row + 1 , lowercase__ , lowercase__ )
if mat[row][col]:
__snake_case = 1 + min([right, diagonal, down] )
__snake_case = max(largest_square_area[0] , lowercase__ )
__snake_case = sub_problem_sol
return sub_problem_sol
else:
return 0
__snake_case = [0]
__snake_case = [[-1] * cols for _ in range(lowercase__ )]
update_area_of_max_square_using_dp_array(0 , 0 , lowercase__ )
return largest_square_area[0]
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
"""simple docstring"""
__snake_case = [[0] * (cols + 1) for _ in range(rows + 1 )]
__snake_case = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__snake_case = dp_array[row][col + 1]
__snake_case = dp_array[row + 1][col + 1]
__snake_case = dp_array[row + 1][col]
if mat[row][col] == 1:
__snake_case = 1 + min(lowercase__ , lowercase__ , lowercase__ )
__snake_case = max(dp_array[row][col] , lowercase__ )
else:
__snake_case = 0
return largest_square_area
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
"""simple docstring"""
__snake_case = [0] * (cols + 1)
__snake_case = [0] * (cols + 1)
__snake_case = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__snake_case = current_row[col + 1]
__snake_case = next_row[col + 1]
__snake_case = next_row[col]
if mat[row][col] == 1:
__snake_case = 1 + min(lowercase__ , lowercase__ , lowercase__ )
__snake_case = max(current_row[col] , lowercase__ )
else:
__snake_case = 0
__snake_case = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 56 | 0 |
'''simple docstring'''
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def __a(SCREAMING_SNAKE_CASE_ : Optional[int] ): # picklable for multiprocessing
'''simple docstring'''
return x.sum()
def __a(SCREAMING_SNAKE_CASE_ : List[Any] ): # picklable for multiprocessing
'''simple docstring'''
return i + 1
@dataclass
class lowerCAmelCase_ :
__lowerCamelCase : int
__lowerCamelCase : str
class lowerCAmelCase_ ( __magic_name__ ):
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = {}
_lowerCAmelCase = []
_lowerCAmelCase = 1
_lowerCAmelCase = [1, 2]
_lowerCAmelCase = {"a": 1, "b": 2}
_lowerCAmelCase = {"a": [1, 2], "b": [3, 4]}
_lowerCAmelCase = {"a": {"1": 1}, "b": 2}
_lowerCAmelCase = {"a": 1, "b": 2, "c": 3, "d": 4}
_lowerCAmelCase = {}
_lowerCAmelCase = []
_lowerCAmelCase = 2
_lowerCAmelCase = [2, 3]
_lowerCAmelCase = {"a": 2, "b": 3}
_lowerCAmelCase = {"a": [2, 3], "b": [4, 5]}
_lowerCAmelCase = {"a": {"1": 2}, "b": 3}
_lowerCAmelCase = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
_lowerCAmelCase = 2
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
_lowerCAmelCase = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
_lowerCAmelCase = {"a": 2, "b": 0, "c": 2}
_lowerCAmelCase = {
"a": np.eye(2 ).astype(_lowerCAmelCase ),
"b": np.zeros(3 ).astype(_lowerCAmelCase ),
"c": np.ones(2 ).astype(_lowerCAmelCase ),
}
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , map_numpy=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_lowerCAmelCase , _lowerCAmelCase , map_numpy=_lowerCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , map_numpy=_lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_lowerCAmelCase , _lowerCAmelCase , map_numpy=_lowerCAmelCase , num_proc=_lowerCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(_lowerCAmelCase ): # can't pickle a local lambda
map_nested(lambda _lowerCAmelCase : x + 1 , _lowerCAmelCase , num_proc=_lowerCAmelCase )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = {"a": 1, "b": 2}
_lowerCAmelCase = {"a": 3, "b": 4}
_lowerCAmelCase = {"a": 5, "b": 6}
_lowerCAmelCase = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) ) , _lowerCAmelCase )
def _snake_case ( self ) -> str:
class lowerCAmelCase_ :
__lowerCamelCase : Tuple = "bar"
_lowerCAmelCase = Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(_lowerCAmelCase , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any ):
'''simple docstring'''
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
_lowerCAmelCase = {F'''{i}''': i for i in range(SCREAMING_SNAKE_CASE_ )}
_lowerCAmelCase = map_nested(lambda SCREAMING_SNAKE_CASE_ : x + 10 , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class lowerCAmelCase_ ( __magic_name__ ):
@require_tf
def _snake_case ( self ) -> Optional[Any]:
import tensorflow as tf
from tensorflow.keras import layers
_lowerCAmelCase = layers.Dense(2 )
def gen_random_output():
_lowerCAmelCase = tf.random.uniform((1, 3) )
return model(_lowerCAmelCase ).numpy()
with temp_seed(42 , set_tensorflow=_lowerCAmelCase ):
_lowerCAmelCase = gen_random_output()
with temp_seed(42 , set_tensorflow=_lowerCAmelCase ):
_lowerCAmelCase = gen_random_output()
_lowerCAmelCase = gen_random_output()
np.testing.assert_equal(_lowerCAmelCase , _lowerCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def _snake_case ( self ) -> Any:
import torch
def gen_random_output():
_lowerCAmelCase = torch.nn.Linear(3 , 2 )
_lowerCAmelCase = torch.rand(1 , 3 )
return model(_lowerCAmelCase ).detach().numpy()
with temp_seed(42 , set_pytorch=_lowerCAmelCase ):
_lowerCAmelCase = gen_random_output()
with temp_seed(42 , set_pytorch=_lowerCAmelCase ):
_lowerCAmelCase = gen_random_output()
_lowerCAmelCase = gen_random_output()
np.testing.assert_equal(_lowerCAmelCase , _lowerCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def _snake_case ( self ) -> Dict:
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
_lowerCAmelCase = gen_random_output()
with temp_seed(42 ):
_lowerCAmelCase = gen_random_output()
_lowerCAmelCase = gen_random_output()
np.testing.assert_equal(_lowerCAmelCase , _lowerCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data" , [{}] )
def __a(SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
_lowerCAmelCase = NestedDataStructure(SCREAMING_SNAKE_CASE_ ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output" , [
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
] , )
def __a(SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
'''simple docstring'''
_lowerCAmelCase = NestedDataStructure(SCREAMING_SNAKE_CASE_ ).flatten()
assert output == expected_output
def __a():
'''simple docstring'''
_lowerCAmelCase = A(x=1 , y="foobar" )
_lowerCAmelCase = {"x": 1, "y": "foobar"}
assert asdict(SCREAMING_SNAKE_CASE_ ) == expected_output
_lowerCAmelCase = {"a": {"b": A(x=10 , y="foo" )}, "c": [A(x=20 , y="bar" )]}
_lowerCAmelCase = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(SCREAMING_SNAKE_CASE_ ) == expected_output
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
asdict([1, A(x=10 , y="foo" )] )
def __a(SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
return text.split()
def __a(SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def __a():
'''simple docstring'''
with Pool(2 ) as pool:
_lowerCAmelCase = list(iflatmap_unordered(SCREAMING_SNAKE_CASE_ , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(SCREAMING_SNAKE_CASE_ ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
_lowerCAmelCase = list(iflatmap_unordered(SCREAMING_SNAKE_CASE_ , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(SCREAMING_SNAKE_CASE_ ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
_lowerCAmelCase = []
for yield_time, content in iflatmap_unordered(
SCREAMING_SNAKE_CASE_ , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(SCREAMING_SNAKE_CASE_ )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(SCREAMING_SNAKE_CASE_ ) == 4
| 18 |
'''simple docstring'''
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def _a () -> Union[str, Any]:
"""simple docstring"""
__snake_case = 1_0
__snake_case = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
__snake_case = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [9_7], 'text': ['1976']}] * 1_0,
'id': list(range(lowercase__ ) ),
} , features=lowercase__ , )
return dataset
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple , lowercase__ : Dict ) -> Tuple:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=lowercase__ )
return filename
# FILE_CONTENT + files
_a : Union[str, Any] = "\\n Text data.\n Second line of data."
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> List[str]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt'
__snake_case = FILE_CONTENT
with open(lowercase__ , 'w' ) as f:
f.write(lowercase__ )
return filename
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
import bza
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
__snake_case = bytes(lowercase__ , 'utf-8' )
with bza.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Union[str, Any] ) -> Dict:
"""simple docstring"""
import gzip
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
__snake_case = bytes(lowercase__ , 'utf-8' )
with gzip.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple ) -> Optional[int]:
"""simple docstring"""
if datasets.config.LZ4_AVAILABLE:
import lza.frame
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
__snake_case = bytes(lowercase__ , 'utf-8' )
with lza.frame.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Dict , lowercase__ : Tuple ) -> Tuple:
"""simple docstring"""
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(lowercase__ , 'w' ) as archive:
archive.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[int] , lowercase__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
import tarfile
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(lowercase__ , 'w' ) as f:
f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> Tuple:
"""simple docstring"""
import lzma
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
__snake_case = bytes(lowercase__ , 'utf-8' )
with lzma.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Dict , lowercase__ : str ) -> Union[str, Any]:
"""simple docstring"""
import zipfile
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> int:
"""simple docstring"""
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
__snake_case = bytes(lowercase__ , 'utf-8' )
with zstd.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.xml'
__snake_case = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(lowercase__ , 'w' ) as f:
f.write(lowercase__ )
return filename
_a : int = [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
_a : List[str] = [
{"col_1": "4", "col_2": 4, "col_3": 4.0},
{"col_1": "5", "col_2": 5, "col_3": 5.0},
]
_a : Tuple = {
"col_1": ["0", "1", "2", "3"],
"col_2": [0, 1, 2, 3],
"col_3": [0.0, 1.0, 2.0, 3.0],
}
_a : Optional[int] = [
{"col_3": 0.0, "col_1": "0", "col_2": 0},
{"col_3": 1.0, "col_1": "1", "col_2": 1},
]
_a : Any = [
{"col_1": "s0", "col_2": 0, "col_3": 0.0},
{"col_1": "s1", "col_2": 1, "col_3": 1.0},
{"col_1": "s2", "col_2": 2, "col_3": 2.0},
{"col_1": "s3", "col_2": 3, "col_3": 3.0},
]
@pytest.fixture(scope='session' )
def _a () -> Optional[Any]:
"""simple docstring"""
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case = datasets.Dataset.from_dict(lowercase__ )
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Any ) -> Dict:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(lowercase__ ) ) as con:
__snake_case = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> Tuple:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(lowercase__ , 'w' , newline='' ) as f:
__snake_case = csv.DictWriter(lowercase__ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> Tuple:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(lowercase__ , 'w' , newline='' ) as f:
__snake_case = csv.DictWriter(lowercase__ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : str , lowercase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
import bza
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(lowercase__ , 'rb' ) as f:
__snake_case = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] , lowercase__ : Union[str, Any] , lowercase__ : str ) -> Optional[int]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] , lowercase__ : Tuple , lowercase__ : int ) -> int:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(lowercase__ , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] , lowercase__ : Dict , lowercase__ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] ) -> int:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
__snake_case = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(lowercase__ , 'wb' ) as f:
__snake_case = pq.ParquetWriter(lowercase__ , schema=lowercase__ )
__snake_case = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowercase__ ) )] for k in DATA[0]} , schema=lowercase__ )
writer.write_table(lowercase__ )
writer.close()
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> List[str]:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
__snake_case = {'data': DATA}
with open(lowercase__ , 'w' ) as f:
json.dump(lowercase__ , lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] ) -> List[Any]:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
__snake_case = {'data': DATA_DICT_OF_LISTS}
with open(lowercase__ , 'w' ) as f:
json.dump(lowercase__ , lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(lowercase__ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowercase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(lowercase__ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowercase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : int ) -> Tuple:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(lowercase__ , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(lowercase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Dict ) -> int:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(lowercase__ , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(lowercase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : int , lowercase__ : List[Any] ) -> Dict:
"""simple docstring"""
import gzip
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(lowercase__ , 'rb' ) as orig_file:
with gzip.open(lowercase__ , 'wb' ) as zipped_file:
zipped_file.writelines(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Union[str, Any] , lowercase__ : Dict ) -> Optional[Any]:
"""simple docstring"""
import gzip
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(lowercase__ , 'rb' ) as orig_file:
with gzip.open(lowercase__ , 'wb' ) as zipped_file:
zipped_file.writelines(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : str , lowercase__ : str , lowercase__ : str ) -> Optional[int]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Dict , lowercase__ : Optional[int] , lowercase__ : List[Any] , lowercase__ : List[Any] ) -> str:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.join('nested' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : str ) -> Optional[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : str , lowercase__ : Optional[int] , lowercase__ : Tuple ) -> List[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(lowercase__ , 'w' ) as f:
f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple , lowercase__ : Tuple , lowercase__ : Dict , lowercase__ : int ) -> Optional[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(lowercase__ , 'w' ) as f:
f.add(lowercase__ , arcname=os.path.join('nested' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__snake_case = ['0', '1', '2', '3']
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(lowercase__ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] ) -> List[Any]:
"""simple docstring"""
__snake_case = ['0', '1', '2', '3']
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(lowercase__ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[int] ) -> Dict:
"""simple docstring"""
__snake_case = ['0', '1', '2', '3']
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(lowercase__ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] , lowercase__ : Union[str, Any] , lowercase__ : Any ) -> str:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple , lowercase__ : Any , lowercase__ : Tuple ) -> List[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] , lowercase__ : Optional[int] , lowercase__ : Any ) -> Union[str, Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename('unsupported.ext' ) )
f.write(lowercase__ , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Any ) -> List[Any]:
"""simple docstring"""
__snake_case = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(lowercase__ , 'w' , encoding='utf-8' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a () -> int:
"""simple docstring"""
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def _a () -> Optional[int]:
"""simple docstring"""
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] , lowercase__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : str ) -> List[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 1_0 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 1_0 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
return data_dir
| 56 | 0 |
"""simple docstring"""
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
_a = HUGGINGFACE_HUB_CACHE
_a = """config.json"""
_a = """diffusion_pytorch_model.bin"""
_a = """diffusion_flax_model.msgpack"""
_a = """model.onnx"""
_a = """diffusion_pytorch_model.safetensors"""
_a = """weights.pb"""
_a = """https://huggingface.co"""
_a = default_cache_path
_a = """diffusers_modules"""
_a = os.getenv("""HF_MODULES_CACHE""", os.path.join(hf_cache_home, """modules"""))
_a = ["""fp16""", """non-ema"""]
_a = """.self_attn"""
| 19 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a : Optional[Any] = logging.get_logger(__name__)
_a : Tuple = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = "camembert"
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3_0522 , SCREAMING_SNAKE_CASE_ : str=768 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=12 , SCREAMING_SNAKE_CASE_ : Dict=12 , SCREAMING_SNAKE_CASE_ : Optional[Any]=3072 , SCREAMING_SNAKE_CASE_ : Tuple="gelu" , SCREAMING_SNAKE_CASE_ : List[str]=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=512 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : Any=0.0_2 , SCREAMING_SNAKE_CASE_ : Tuple=1e-12 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1 , SCREAMING_SNAKE_CASE_ : Dict=0 , SCREAMING_SNAKE_CASE_ : int=2 , SCREAMING_SNAKE_CASE_ : Dict="absolute" , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : Tuple=None , **SCREAMING_SNAKE_CASE_ : Dict , ) -> int:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = position_embedding_type
__snake_case = use_cache
__snake_case = classifier_dropout
class _lowercase ( __lowercase ):
@property
def a ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__snake_case = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 56 | 0 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase_ (lowercase__ ):
snake_case =['image_processor', 'tokenizer']
snake_case ='ViltImageProcessor'
snake_case =('BertTokenizer', 'BertTokenizerFast')
def __init__( self , lowercase_=None , lowercase_=None , **lowercase_) -> Optional[Any]:
a__ =None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowercase_ , )
a__ =kwargs.pop('feature_extractor')
a__ =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(lowercase_ , lowercase_)
a__ =self.image_processor
def __call__( self , lowercase_ , lowercase_ = None , lowercase_ = True , lowercase_ = False , lowercase_ = None , lowercase_ = None , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = False , lowercase_ = False , lowercase_ = False , lowercase_ = False , lowercase_ = True , lowercase_ = None , **lowercase_ , ) -> BatchEncoding:
a__ =self.tokenizer(
text=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
# add pixel_values + pixel_mask
a__ =self.image_processor(lowercase_ , return_tensors=lowercase_)
encoding.update(lowercase_)
return encoding
def __UpperCamelCase ( self , *lowercase_ , **lowercase_) -> int:
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_)
def __UpperCamelCase ( self , *lowercase_ , **lowercase_) -> Tuple:
return self.tokenizer.decode(*lowercase_ , **lowercase_)
@property
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =self.tokenizer.model_input_names
a__ =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def __UpperCamelCase ( self) -> List[str]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowercase_ , )
return self.image_processor_class
@property
def __UpperCamelCase ( self) -> Union[str, Any]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , lowercase_ , )
return self.image_processor
| 20 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : List[str] = logging.get_logger(__name__)
_a : Dict = {
"facebook/timesformer": "https://huggingface.co/facebook/timesformer/resolve/main/config.json",
}
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : int = "timesformer"
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : List[str]=224 , SCREAMING_SNAKE_CASE_ : List[str]=16 , SCREAMING_SNAKE_CASE_ : Any=3 , SCREAMING_SNAKE_CASE_ : int=8 , SCREAMING_SNAKE_CASE_ : Tuple=768 , SCREAMING_SNAKE_CASE_ : int=12 , SCREAMING_SNAKE_CASE_ : Optional[int]=12 , SCREAMING_SNAKE_CASE_ : Optional[int]=3072 , SCREAMING_SNAKE_CASE_ : Tuple="gelu" , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.0 , SCREAMING_SNAKE_CASE_ : List[Any]=0.0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0_2 , SCREAMING_SNAKE_CASE_ : Any=1e-6 , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : List[str]="divided_space_time" , SCREAMING_SNAKE_CASE_ : int=0 , **SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> List[str]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = num_frames
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = qkv_bias
__snake_case = attention_type
__snake_case = drop_path_rate
| 56 | 0 |
class __A :
def __init__( self :Optional[int] , __snake_case :int ):
'''simple docstring'''
__magic_name__ : Optional[Any] =size
__magic_name__ : Union[str, Any] =[0] * size
__magic_name__ : Optional[int] =[0] * size
@staticmethod
def A__ ( __snake_case :int ):
'''simple docstring'''
return index | (index + 1)
@staticmethod
def A__ ( __snake_case :int ):
'''simple docstring'''
return (index & (index + 1)) - 1
def A__ ( self :Optional[Any] , __snake_case :int , __snake_case :int ):
'''simple docstring'''
__magic_name__ : Optional[int] =value
while index < self.size:
__magic_name__ : List[Any] =self.get_prev(__snake_case ) + 1
if current_left_border == index:
__magic_name__ : str =value
else:
__magic_name__ : Tuple =max(__snake_case , __snake_case , __snake_case )
__magic_name__ : Tuple =self.get_next(__snake_case )
def A__ ( self :List[Any] , __snake_case :int , __snake_case :int ):
'''simple docstring'''
right -= 1 # Because of right is exclusive
__magic_name__ : Optional[Any] =0
while left <= right:
__magic_name__ : int =self.get_prev(__snake_case )
if left <= current_left:
__magic_name__ : str =max(__snake_case , self.tree[right] )
__magic_name__ : int =current_left
else:
__magic_name__ : int =max(__snake_case , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 |
'''simple docstring'''
from typing import Any
class _lowercase :
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Any ) -> Any:
__snake_case = data
__snake_case = None
class _lowercase :
def __init__( self : List[Any] ) -> Tuple:
__snake_case = None
def a ( self : int ) -> Union[str, Any]:
__snake_case = self.head
while temp is not None:
print(temp.data , end=' ' )
__snake_case = temp.next
print()
def a ( self : Dict , SCREAMING_SNAKE_CASE_ : Any ) -> List[str]:
__snake_case = Node(SCREAMING_SNAKE_CASE_ )
__snake_case = self.head
__snake_case = new_node
def a ( self : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any ) -> List[str]:
if node_data_a == node_data_a:
return
else:
__snake_case = self.head
while node_a is not None and node_a.data != node_data_a:
__snake_case = node_a.next
__snake_case = self.head
while node_a is not None and node_a.data != node_data_a:
__snake_case = node_a.next
if node_a is None or node_a is None:
return
__snake_case , __snake_case = node_a.data, node_a.data
if __name__ == "__main__":
_a : Dict = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("After swapping")
ll.print_list()
| 56 | 0 |
'''simple docstring'''
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
_snake_case : Optional[int] = {
'E': 12.70,
'T': 9.06,
'A': 8.17,
'O': 7.51,
'I': 6.97,
'N': 6.75,
'S': 6.33,
'H': 6.09,
'R': 5.99,
'D': 4.25,
'L': 4.03,
'C': 2.78,
'U': 2.76,
'M': 2.41,
'W': 2.36,
'F': 2.23,
'G': 2.02,
'Y': 1.97,
'P': 1.93,
'B': 1.29,
'V': 0.98,
'K': 0.77,
'J': 0.15,
'X': 0.15,
'Q': 0.10,
'Z': 0.07,
}
_snake_case : Dict = 'ETAOINSHRDLCUMWFGYPBVKJXQZ'
_snake_case : Optional[int] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def snake_case_ (UpperCamelCase : str ):
'''simple docstring'''
_a = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def snake_case_ (UpperCamelCase : tuple ):
'''simple docstring'''
return x[0]
def snake_case_ (UpperCamelCase : str ):
'''simple docstring'''
_a = get_letter_count(UpperCamelCase )
_a = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(UpperCamelCase )
_a = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=UpperCamelCase )
_a = ''''''.join(freq_to_letter[freq] )
_a = list(freq_to_letter_str.items() )
freq_pairs.sort(key=UpperCamelCase , reverse=UpperCamelCase )
_a = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(UpperCamelCase )
def snake_case_ (UpperCamelCase : str ):
'''simple docstring'''
_a = get_frequency_order(UpperCamelCase )
_a = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_a : int = {
"configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"],
"tokenization_tapas": ["TapasTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
"TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TapasForMaskedLM",
"TapasForQuestionAnswering",
"TapasForSequenceClassification",
"TapasModel",
"TapasPreTrainedModel",
"load_tf_weights_in_tapas",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = [
"TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFTapasForMaskedLM",
"TFTapasForQuestionAnswering",
"TFTapasForSequenceClassification",
"TFTapasModel",
"TFTapasPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
_a : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 56 | 0 |
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def _snake_case (__lowercase=32 , __lowercase=10 , __lowercase=100 , __lowercase=1026 , __lowercase=True , __lowercase="data/tokenized_stories_train_wikitext103.jbl" , __lowercase="igf_context_pairs.jbl" , ):
set_seed(3)
# generate train_data and objective_set
UpperCamelCase_ , UpperCamelCase_ = generate_datasets(
__lowercase , __lowercase , number=__lowercase , min_len=1026 , trim=__lowercase)
# keeps model same across runs
set_seed(4)
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
UpperCamelCase_ = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# load pretrained model
UpperCamelCase_ = load_gpta('gpt2').to(__lowercase)
print('computing perplexity on objective set')
UpperCamelCase_ = compute_perplexity(__lowercase , __lowercase , __lowercase).item()
print('perplexity on objective set:' , __lowercase)
# collect igf pairs and save to file demo.jbl
collect_objective_set(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase)
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def _snake_case (__lowercase , __lowercase=15 , __lowercase=128 , __lowercase=100 , __lowercase="igf_model.pt" , ):
set_seed(42)
# Load pre-trained model
UpperCamelCase_ = GPTaLMHeadModel.from_pretrained('gpt2')
# Initialize secondary learner to use embedding weights of model
UpperCamelCase_ = SecondaryLearner(__lowercase)
# Train secondary learner
UpperCamelCase_ = train_secondary_learner(
__lowercase , __lowercase , max_epochs=__lowercase , batch_size=__lowercase , eval_freq=100 , igf_model_path=__lowercase , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase=32 , __lowercase=1000 , __lowercase=16 , __lowercase=1.0 , __lowercase=recopy_gpta , __lowercase=None , __lowercase=10 , __lowercase="gpt2_finetuned.pt" , ):
UpperCamelCase_ = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
UpperCamelCase_ = RandomSampler(__lowercase)
UpperCamelCase_ = DataLoader(__lowercase , sampler=__lowercase)
UpperCamelCase_ = max_steps // (len(__lowercase)) + 1
UpperCamelCase_ = 0
UpperCamelCase_ = torch.zeros((1, context_len) , dtype=torch.long , device=__lowercase)
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = recopy_model(__lowercase , __lowercase , __lowercase)
model.train()
if secondary_learner is not None:
secondary_learner.to(__lowercase)
secondary_learner.eval()
UpperCamelCase_ = []
UpperCamelCase_ = 0
UpperCamelCase_ = []
UpperCamelCase_ = []
# Compute the performance of the transformer model at the beginning
UpperCamelCase_ = compute_perplexity(__lowercase , __lowercase , __lowercase)
test_perps.append(__lowercase)
print('Test perplexity, step' , __lowercase , ':' , __lowercase)
for epoch in range(int(__lowercase)):
for step, example in enumerate(__lowercase):
torch.cuda.empty_cache()
UpperCamelCase_ = random.randint(0 , example.size(2) - context_len - 1)
UpperCamelCase_ = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
UpperCamelCase_ = model(__lowercase , labels=__lowercase)
UpperCamelCase_ = True
if secondary_learner is not None:
UpperCamelCase_ = secondary_learner.forward(
torch.tensor(__lowercase , dtype=torch.long , device=__lowercase).unsqueeze(0))[0].item()
observed_qs.append(float(__lowercase))
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
UpperCamelCase_ = -1
if predicted_q < threshold:
UpperCamelCase_ = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu()))
UpperCamelCase_ = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
UpperCamelCase_ = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0)
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
UpperCamelCase_ = compute_perplexity(__lowercase , __lowercase , __lowercase)
test_perps.append(__lowercase)
print('Test perplexity, step' , __lowercase , ':' , __lowercase)
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , __lowercase)
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def _snake_case ():
UpperCamelCase_ = argparse.ArgumentParser(description='Fine-tune a transformer model with IGF on a language modeling task')
# Required parameters
parser.add_argument(
'--data_dir' , default=__lowercase , type=__lowercase , required=__lowercase , help='The input data dir. Should contain data files for WikiText.' , )
parser.add_argument(
'--model_name_or_path' , default=__lowercase , type=__lowercase , required=__lowercase , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--data_file' , type=__lowercase , default=__lowercase , help=(
'A jbl file containing tokenized data which can be split as objective dataset, '
'train_dataset and test_dataset.'
) , )
parser.add_argument(
'--igf_data_file' , type=__lowercase , default=__lowercase , help='A jbl file containing the context and information gain pairs to train secondary learner.' , )
parser.add_argument(
'--output_dir' , default=__lowercase , type=__lowercase , required=__lowercase , help='The output directory where the final fine-tuned model is stored.' , )
parser.add_argument(
'--tokenizer_name' , default=__lowercase , type=__lowercase , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument('--seed' , type=__lowercase , default=__lowercase , help='A seed for reproducible training.')
parser.add_argument(
'--context_len' , default=32 , type=__lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--size_objective_set' , default=100 , type=__lowercase , help='number of articles that are long enough to be used as our objective set' , )
parser.add_argument(
'--eval_freq' , default=100 , type=__lowercase , help='secondary model evaluation is triggered at eval_freq')
parser.add_argument('--max_steps' , default=1000 , type=__lowercase , help='To calculate training epochs')
parser.add_argument(
'--secondary_learner_batch_size' , default=128 , type=__lowercase , help='batch size of training data for secondary learner' , )
parser.add_argument(
'--batch_size' , default=16 , type=__lowercase , help='batch size of training data of language model(gpt2) ')
parser.add_argument(
'--eval_interval' , default=10 , type=__lowercase , help=(
'decay the selectivity of our secondary learner filter from'
'1 standard deviation above average to 1 below average after 10 batches'
) , )
parser.add_argument(
'--number' , default=100 , type=__lowercase , help='The number of examples split to be used as objective_set/test_data')
parser.add_argument(
'--min_len' , default=1026 , type=__lowercase , help='The minimum length of the article to be used as objective set')
parser.add_argument(
'--secondary_learner_max_epochs' , default=15 , type=__lowercase , help='number of epochs to train secondary learner')
parser.add_argument('--trim' , default=__lowercase , type=__lowercase , help='truncate the example if it exceeds context length')
parser.add_argument(
'--threshold' , default=1.0 , type=__lowercase , help=(
'The threshold value used by secondary learner to filter the train_data and allow only'
' informative data as input to the model'
) , )
parser.add_argument('--finetuned_model_name' , default='gpt2_finetuned.pt' , type=__lowercase , help='finetuned_model_name')
parser.add_argument(
'--recopy_model' , default=__lowercase , type=__lowercase , help='Reset the model to the original pretrained GPT-2 weights after each iteration' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=__lowercase , data_file='data/tokenized_stories_train_wikitext103.jbl' , igf_data_file='igf_context_pairs.jbl' , )
# Load train data for secondary learner
UpperCamelCase_ = joblib.load('data/IGF_values.jbl')
# Train secondary learner
UpperCamelCase_ = training_secondary_learner(
__lowercase , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='igf_model.pt' , )
# load pretrained gpt2 model
UpperCamelCase_ = GPTaLMHeadModel.from_pretrained('gpt2')
set_seed(42)
# Generate train and test data to train and evaluate gpt2 model
UpperCamelCase_ , UpperCamelCase_ = generate_datasets(
context_len=32 , file='data/tokenized_stories_train_wikitext103.jbl' , number=100 , min_len=1026 , trim=__lowercase)
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
__lowercase , __lowercase , __lowercase , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=__lowercase , secondary_learner=__lowercase , eval_interval=10 , finetuned_model_name='gpt2_finetuned.pt' , )
if __name__ == "__main__":
main()
| 23 |
'''simple docstring'''
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _lowercase ( __lowercase , __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : List[str] = AutoencoderKL
_SCREAMING_SNAKE_CASE : Union[str, Any] = "sample"
_SCREAMING_SNAKE_CASE : Union[str, Any] = 1e-2
@property
def a ( self : List[str] ) -> Optional[int]:
__snake_case = 4
__snake_case = 3
__snake_case = (32, 32)
__snake_case = floats_tensor((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE_ )
return {"sample": image}
@property
def a ( self : List[Any] ) -> List[Any]:
return (3, 32, 32)
@property
def a ( self : int ) -> int:
return (3, 32, 32)
def a ( self : Tuple ) -> Union[str, Any]:
__snake_case = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
__snake_case = self.dummy_input
return init_dict, inputs_dict
def a ( self : Optional[Any] ) -> Any:
pass
def a ( self : Tuple ) -> List[Any]:
pass
@unittest.skipIf(torch_device == 'mps' , 'Gradient checkpointing skipped on MPS' )
def a ( self : List[str] ) -> int:
# enable deterministic behavior for gradient checkpointing
__snake_case , __snake_case = self.prepare_init_args_and_inputs_for_common()
__snake_case = self.model_class(**SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
assert not model.is_gradient_checkpointing and model.training
__snake_case = model(**SCREAMING_SNAKE_CASE_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
__snake_case = torch.randn_like(SCREAMING_SNAKE_CASE_ )
__snake_case = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
__snake_case = self.model_class(**SCREAMING_SNAKE_CASE_ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(SCREAMING_SNAKE_CASE_ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
__snake_case = model_a(**SCREAMING_SNAKE_CASE_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
__snake_case = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
__snake_case = dict(model.named_parameters() )
__snake_case = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) )
def a ( self : int ) -> int:
__snake_case , __snake_case = AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy' , output_loading_info=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(SCREAMING_SNAKE_CASE_ )
__snake_case = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def a ( self : Optional[int] ) -> List[str]:
__snake_case = AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy' )
__snake_case = model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
if torch_device == "mps":
__snake_case = torch.manual_seed(0 )
else:
__snake_case = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
__snake_case = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
__snake_case = image.to(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ , sample_posterior=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).sample
__snake_case = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
__snake_case = torch.tensor(
[
-4.0_078e-01,
-3.8_323e-04,
-1.2_681e-01,
-1.1_462e-01,
2.0_095e-01,
1.0_893e-01,
-8.8_247e-02,
-3.0_361e-01,
-9.8_644e-03,
] )
elif torch_device == "cpu":
__snake_case = torch.tensor(
[-0.1_3_5_2, 0.0_8_7_8, 0.0_4_1_9, -0.0_8_1_8, -0.1_0_6_9, 0.0_6_8_8, -0.1_4_5_8, -0.4_4_4_6, -0.0_0_2_6] )
else:
__snake_case = torch.tensor(
[-0.2_4_2_1, 0.4_6_4_2, 0.2_5_0_7, -0.0_4_3_8, 0.0_6_8_2, 0.3_1_6_0, -0.2_0_1_8, -0.0_7_2_7, 0.2_4_8_5] )
self.assertTrue(torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , rtol=1e-2 ) )
@slow
class _lowercase ( unittest.TestCase ):
def a ( self : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any ) -> Union[str, Any]:
return f'gaussian_noise_s={seed}_shape={"_".join([str(SCREAMING_SNAKE_CASE_ ) for s in shape] )}.npy'
def a ( self : Optional[Any] ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : int , SCREAMING_SNAKE_CASE_ : Optional[Any]=0 , SCREAMING_SNAKE_CASE_ : int=(4, 3, 512, 512) , SCREAMING_SNAKE_CASE_ : str=False ) -> int:
__snake_case = torch.floataa if fpaa else torch.floataa
__snake_case = torch.from_numpy(load_hf_numpy(self.get_file_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) ).to(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
return image
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple="CompVis/stable-diffusion-v1-4" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False ) -> List[str]:
__snake_case = 'fp16' if fpaa else None
__snake_case = torch.floataa if fpaa else torch.floataa
__snake_case = AutoencoderKL.from_pretrained(
SCREAMING_SNAKE_CASE_ , subfolder='vae' , torch_dtype=SCREAMING_SNAKE_CASE_ , revision=SCREAMING_SNAKE_CASE_ , )
model.to(SCREAMING_SNAKE_CASE_ ).eval()
return model
def a ( self : List[str] , SCREAMING_SNAKE_CASE_ : Tuple=0 ) -> Union[str, Any]:
if torch_device == "mps":
return torch.manual_seed(SCREAMING_SNAKE_CASE_ )
return torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_3, 0.9_8_7_8, -0.0_4_9_5, -0.0_7_9_0, -0.2_7_0_9, 0.8_3_7_5, -0.2_0_6_0, -0.0_8_2_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_6, 0.1_1_6_8, 0.1_3_3_2, -0.4_8_4_0, -0.2_5_0_8, -0.0_7_9_1, -0.0_4_9_3, -0.4_0_8_9], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def a ( self : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_generator(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , sample_posterior=SCREAMING_SNAKE_CASE_ ).sample
assert sample.shape == image.shape
__snake_case = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__snake_case = torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_5_1_3, 0.0_2_8_9, 1.3_7_9_9, 0.2_1_6_6, -0.2_5_7_3, -0.0_8_7_1, 0.5_1_0_3, -0.0_9_9_9]],
[47, [-0.4_1_2_8, -0.1_3_2_0, -0.3_7_0_4, 0.1_9_6_5, -0.4_1_1_6, -0.2_3_3_2, -0.3_3_4_0, 0.2_2_4_7]],
# fmt: on
] )
@require_torch_gpu
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Union[str, Any]:
__snake_case = self.get_sd_vae_model(fpaa=SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , fpaa=SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_generator(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , sample_posterior=SCREAMING_SNAKE_CASE_ ).sample
assert sample.shape == image.shape
__snake_case = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE_ )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_9, 0.9_8_6_6, -0.0_4_8_7, -0.0_7_7_7, -0.2_7_1_6, 0.8_3_6_8, -0.2_0_5_5, -0.0_8_1_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_7, 0.1_1_4_7, 0.1_3_3_3, -0.4_8_4_1, -0.2_5_0_6, -0.0_8_0_5, -0.0_4_9_1, -0.4_0_8_5], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def a ( self : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict ) -> List[Any]:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ ).sample
assert sample.shape == image.shape
__snake_case = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__snake_case = torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_0_5_1, -0.1_8_0_3, -0.2_3_1_1, -0.2_1_1_4, -0.3_2_9_2, -0.3_5_7_4, -0.2_9_5_3, -0.3_3_2_3]],
[37, [-0.2_6_3_2, -0.2_6_2_5, -0.2_1_9_9, -0.2_7_4_1, -0.4_5_3_9, -0.4_9_9_0, -0.3_7_2_0, -0.4_9_2_5]],
# fmt: on
] )
@require_torch_gpu
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> int:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__snake_case = sample[-1, -2:, :2, -2:].flatten().cpu()
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE_ )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_3_6_9, 0.0_2_0_7, -0.0_7_7_6, -0.0_6_8_2, -0.1_7_4_7, -0.1_9_3_0, -0.1_4_6_5, -0.2_0_3_9]],
[16, [-0.1_6_2_8, -0.2_1_3_4, -0.2_7_4_7, -0.2_6_4_2, -0.3_7_7_4, -0.4_4_0_4, -0.3_6_8_7, -0.4_2_7_7]],
# fmt: on
] )
@require_torch_gpu
def a ( self : int , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> str:
__snake_case = self.get_sd_vae_model(fpaa=SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 64, 64) , fpaa=SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__snake_case = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE_ )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='xformers is not required when using PyTorch 2.0.' )
def a ( self : Any , SCREAMING_SNAKE_CASE_ : int ) -> Tuple:
__snake_case = self.get_sd_vae_model(fpaa=SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 64, 64) , fpaa=SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='xformers is not required when using PyTorch 2.0.' )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE_ : int ) -> str:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_0_0_1, 0.0_9_1_8, -2.6_9_8_4, -3.9_7_2_0, -3.2_0_9_9, -5.0_3_5_3, 1.7_3_3_8, -0.2_0_6_5, 3.4_2_6_7]],
[47, [-1.5_0_3_0, -4.3_8_7_1, -6.0_3_5_5, -9.1_1_5_7, -1.6_6_6_1, -2.7_8_5_3, 2.1_6_0_7, -5.0_8_2_3, 2.5_6_3_3]],
# fmt: on
] )
def a ( self : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Union[str, Any]:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_generator(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model.encode(SCREAMING_SNAKE_CASE_ ).latent_dist
__snake_case = dist.sample(generator=SCREAMING_SNAKE_CASE_ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
__snake_case = sample[0, -1, -3:, -3:].flatten().cpu()
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE_ )
__snake_case = 3e-3 if torch_device != 'mps' else 1e-2
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=SCREAMING_SNAKE_CASE_ )
| 56 | 0 |
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _UpperCamelCase ()-> int:
'''simple docstring'''
__snake_case = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
__snake_case = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert('''RGB''' )
return image
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> List[Any]:
'''simple docstring'''
__snake_case = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def _UpperCamelCase (_lowerCamelCase : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] )-> Tuple:
'''simple docstring'''
__snake_case = dct.pop(_lowerCamelCase )
__snake_case = val
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Tuple )-> str:
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__snake_case = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
__snake_case = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
__snake_case = torch.cat((q_bias, torch.zeros_like(_lowerCamelCase , requires_grad=_lowerCamelCase ), v_bias) )
__snake_case = qkv_bias
def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : Tuple )-> Dict:
'''simple docstring'''
__snake_case = 3_64 if '''coco''' in model_name else 2_24
__snake_case = BlipaVisionConfig(image_size=_lowerCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__snake_case = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=_lowerCamelCase ).to_dict()
elif "opt-6.7b" in model_name:
__snake_case = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=_lowerCamelCase ).to_dict()
elif "t5-xl" in model_name:
__snake_case = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__snake_case = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
__snake_case = BlipaConfig(vision_config=_lowerCamelCase , text_config=_lowerCamelCase )
return config, image_size
@torch.no_grad()
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : Any=False )-> Dict:
'''simple docstring'''
__snake_case = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
__snake_case = tokenizer('''\n''' , add_special_tokens=_lowerCamelCase ).input_ids[0]
__snake_case , __snake_case = get_blipa_config(_lowerCamelCase , eos_token_id=_lowerCamelCase )
__snake_case = BlipaForConditionalGeneration(_lowerCamelCase ).eval()
__snake_case = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
__snake_case , __snake_case = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
__snake_case = '''cuda''' if torch.cuda.is_available() else '''cpu'''
__snake_case , __snake_case , __snake_case = load_model_and_preprocess(
name=_lowerCamelCase , model_type=_lowerCamelCase , is_eval=_lowerCamelCase , device=_lowerCamelCase )
original_model.eval()
print('''Done!''' )
# update state dict keys
__snake_case = original_model.state_dict()
__snake_case = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__snake_case = state_dict.pop(_lowerCamelCase )
if key.startswith('''Qformer.bert''' ):
__snake_case = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
__snake_case = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
__snake_case = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
__snake_case = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
__snake_case = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
__snake_case = key.replace('''t5''' , '''language''' )
__snake_case = val
# read in qv biases
read_in_q_v_bias(_lowerCamelCase , _lowerCamelCase )
__snake_case , __snake_case = hf_model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert len(_lowerCamelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__snake_case = load_demo_image()
__snake_case = vis_processors['''eval'''](_lowerCamelCase ).unsqueeze(0 ).to(_lowerCamelCase )
__snake_case = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(_lowerCamelCase )
# create processor
__snake_case = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=_lowerCamelCase , image_std=_lowerCamelCase )
__snake_case = BlipaProcessor(image_processor=_lowerCamelCase , tokenizer=_lowerCamelCase )
__snake_case = processor(images=_lowerCamelCase , return_tensors='''pt''' ).pixel_values.to(_lowerCamelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
original_model.to(_lowerCamelCase )
hf_model.to(_lowerCamelCase )
with torch.no_grad():
if "opt" in model_name:
__snake_case = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
__snake_case = hf_model(_lowerCamelCase , _lowerCamelCase ).logits
else:
__snake_case = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
__snake_case = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
__snake_case = hf_model(_lowerCamelCase , _lowerCamelCase , labels=_lowerCamelCase ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__snake_case = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=_lowerCamelCase )
assert torch.allclose(logits[0, :3, :3] , _lowerCamelCase , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__snake_case = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=_lowerCamelCase )
else:
# cast to same type
__snake_case = logits.dtype
assert torch.allclose(original_logits.to(_lowerCamelCase ) , _lowerCamelCase , atol=1E-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
__snake_case = ''''''
__snake_case = tokenizer(_lowerCamelCase , return_tensors='''pt''' ).input_ids.to(_lowerCamelCase )
__snake_case = original_model.generate({'''image''': original_pixel_values} )
__snake_case = hf_model.generate(
_lowerCamelCase , _lowerCamelCase , do_sample=_lowerCamelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , _lowerCamelCase )
__snake_case = input_ids.shape[1]
__snake_case = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowerCamelCase )
__snake_case = [text.strip() for text in output_text]
print('''HF generation:''' , _lowerCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if push_to_hub:
processor.push_to_hub(f'''nielsr/{model_name}''' )
hf_model.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
UpperCAmelCase_ : Any = argparse.ArgumentParser()
UpperCAmelCase_ : Tuple = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
UpperCAmelCase_ : List[str] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 24 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _lowercase ( __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ShapEPipeline
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["prompt"]
_SCREAMING_SNAKE_CASE : Any = ["prompt"]
_SCREAMING_SNAKE_CASE : str = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
_SCREAMING_SNAKE_CASE : Optional[int] = False
@property
def a ( self : Any ) -> Optional[int]:
return 32
@property
def a ( self : List[Any] ) -> List[Any]:
return 32
@property
def a ( self : Tuple ) -> List[str]:
return self.time_input_dim * 4
@property
def a ( self : Dict ) -> Union[str, Any]:
return 8
@property
def a ( self : List[Any] ) -> Optional[Any]:
__snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def a ( self : Dict ) -> Any:
torch.manual_seed(0 )
__snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(SCREAMING_SNAKE_CASE_ )
@property
def a ( self : str ) -> Dict:
torch.manual_seed(0 )
__snake_case = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__snake_case = PriorTransformer(**SCREAMING_SNAKE_CASE_ )
return model
@property
def a ( self : Optional[Any] ) -> Dict:
torch.manual_seed(0 )
__snake_case = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__snake_case = ShapERenderer(**SCREAMING_SNAKE_CASE_ )
return model
def a ( self : Tuple ) -> Dict:
__snake_case = self.dummy_prior
__snake_case = self.dummy_text_encoder
__snake_case = self.dummy_tokenizer
__snake_case = self.dummy_renderer
__snake_case = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1024 , prediction_type='sample' , use_karras_sigmas=SCREAMING_SNAKE_CASE_ , clip_sample=SCREAMING_SNAKE_CASE_ , clip_sample_range=1.0 , )
__snake_case = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def a ( self : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int]=0 ) -> Union[str, Any]:
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
__snake_case = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
__snake_case = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
__snake_case = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def a ( self : Optional[Any] ) -> str:
__snake_case = 'cpu'
__snake_case = self.get_dummy_components()
__snake_case = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
__snake_case = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__snake_case = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) )
__snake_case = output.images[0]
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__snake_case = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a ( self : int ) -> List[str]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def a ( self : Dict ) -> Any:
__snake_case = torch_device == 'cpu'
__snake_case = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=SCREAMING_SNAKE_CASE_ , relax_max_difference=SCREAMING_SNAKE_CASE_ , )
def a ( self : Union[str, Any] ) -> str:
__snake_case = self.get_dummy_components()
__snake_case = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
__snake_case = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__snake_case = 1
__snake_case = 2
__snake_case = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
for key in inputs.keys():
if key in self.batch_params:
__snake_case = batch_size * [inputs[key]]
__snake_case = pipe(**SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
def a ( self : Optional[int] ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : Union[str, Any] ) -> Optional[Any]:
__snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
__snake_case = ShapEPipeline.from_pretrained('openai/shap-e' )
__snake_case = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__snake_case = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
__snake_case = pipe(
'a shark' , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 56 | 0 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def lowerCamelCase__ ( _a):
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , a : nn.Module , a : int ) -> Dict:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE : str = module
SCREAMING_SNAKE_CASE : List[Any] = nn.Sequential(
nn.Linear(module.in_features , a , bias=a ) , nn.Linear(a , module.out_features , bias=a ) , )
SCREAMING_SNAKE_CASE : Optional[Any] = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=a )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def __UpperCamelCase ( self : str , a : Optional[Any] , *a : str , **a : List[str] ) -> Optional[int]:
"""simple docstring"""
return self.module(a , *a , **a ) + self.adapter(a )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ ='bigscience/bloom-1b7'
# Constant values
lowerCamelCase__ =2.109659552692574
lowerCamelCase__ ='Hello my name is'
lowerCamelCase__ =set()
EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' )
EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' )
EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' )
lowerCamelCase__ =10
def __UpperCamelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained(self.model_name )
class _UpperCamelCase ( __A ):
'''simple docstring'''
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
super().setUp()
# Models and tokenizer
SCREAMING_SNAKE_CASE : Dict = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="auto" )
SCREAMING_SNAKE_CASE : int = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=a , device_map="auto" )
def __UpperCamelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.model_abit.config
self.assertTrue(hasattr(a , "quantization_config" ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = config.to_dict()
SCREAMING_SNAKE_CASE : str = config.to_diff_dict()
SCREAMING_SNAKE_CASE : Union[str, Any] = config.to_json_string()
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
SCREAMING_SNAKE_CASE : int = self.model_fpaa.get_memory_footprint()
SCREAMING_SNAKE_CASE : int = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
SCREAMING_SNAKE_CASE : Union[str, Any] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(a , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer(self.input_text , return_tensors="pt" )
SCREAMING_SNAKE_CASE : Tuple = self.model_abit.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=a ) , self.EXPECTED_OUTPUTS )
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = BitsAndBytesConfig()
SCREAMING_SNAKE_CASE : Optional[int] = True
SCREAMING_SNAKE_CASE : int = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=a , device_map="auto" )
SCREAMING_SNAKE_CASE : int = self.tokenizer(self.input_text , return_tensors="pt" )
SCREAMING_SNAKE_CASE : Tuple = model_abit_from_config.generate(
input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=a ) , self.EXPECTED_OUTPUTS )
def __UpperCamelCase ( self : Any ) -> str:
"""simple docstring"""
with self.assertRaises(a ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(a )
def __UpperCamelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = BitsAndBytesConfig()
with self.assertRaises(a ):
SCREAMING_SNAKE_CASE : List[str] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=a , load_in_abit=a , device_map="auto" , bnb_abit_quant_type="nf4" , )
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
with self.assertRaises(a ):
# Tries with `str`
self.model_abit.to("cpu" )
with self.assertRaises(a ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(a ):
# Tries with a `device`
self.model_abit.to(torch.device("cuda:0" ) )
with self.assertRaises(a ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(a ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer(self.input_text , return_tensors="pt" )
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_fpaa.to(torch.floataa )
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_fpaa.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
SCREAMING_SNAKE_CASE : Tuple = self.model_fpaa.to("cpu" )
# Check this does not throw an error
SCREAMING_SNAKE_CASE : Dict = self.model_fpaa.half()
# Check this does not throw an error
SCREAMING_SNAKE_CASE : str = self.model_fpaa.float()
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = AutoModelForSeqaSeqLM.from_pretrained("t5-small" , load_in_abit=a , device_map="auto" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __UpperCamelCase ( cls : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = "t5-small"
SCREAMING_SNAKE_CASE : Optional[Any] = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense
SCREAMING_SNAKE_CASE : Any = AutoTokenizer.from_pretrained(cls.model_name )
SCREAMING_SNAKE_CASE : Optional[Any] = "Translate in German: Hello, my dog is cute"
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
from transformers import TaForConditionalGeneration
SCREAMING_SNAKE_CASE : Any = TaForConditionalGeneration._keep_in_fpaa_modules
SCREAMING_SNAKE_CASE : Dict = None
# test with `t5-small`
SCREAMING_SNAKE_CASE : List[Any] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=a , device_map="auto" )
SCREAMING_SNAKE_CASE : int = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
SCREAMING_SNAKE_CASE : Any = model.generate(**a )
# test with `flan-t5-small`
SCREAMING_SNAKE_CASE : Dict = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=a , device_map="auto" )
SCREAMING_SNAKE_CASE : int = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
SCREAMING_SNAKE_CASE : Optional[int] = model.generate(**a )
SCREAMING_SNAKE_CASE : Tuple = modules
def __UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
SCREAMING_SNAKE_CASE : Optional[Any] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=a , device_map="auto" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = model.generate(**a )
# test with `flan-t5-small`
SCREAMING_SNAKE_CASE : List[str] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=a , device_map="auto" )
SCREAMING_SNAKE_CASE : Dict = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
SCREAMING_SNAKE_CASE : Tuple = model.generate(**a )
class _UpperCamelCase ( __A ):
'''simple docstring'''
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
super().setUp()
# model_name
SCREAMING_SNAKE_CASE : Optional[int] = "bigscience/bloom-560m"
SCREAMING_SNAKE_CASE : List[str] = "t5-small"
# Different types of model
SCREAMING_SNAKE_CASE : str = AutoModel.from_pretrained(self.model_name , load_in_abit=a , device_map="auto" )
# Sequence classification model
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=a , device_map="auto" )
# CausalLM model
SCREAMING_SNAKE_CASE : List[Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=a , device_map="auto" )
# Seq2seq model
SCREAMING_SNAKE_CASE : List[str] = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=a , device_map="auto" )
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class _UpperCamelCase ( __A ):
'''simple docstring'''
def __UpperCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
super().setUp()
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = pipeline(
"text-generation" , model=self.model_name , model_kwargs={"device_map": "auto", "load_in_4bit": True, "torch_dtype": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
SCREAMING_SNAKE_CASE : Any = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["generated_text"] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class _UpperCamelCase ( __A ):
'''simple docstring'''
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
super().setUp()
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=a , device_map="balanced" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(self.input_text , return_tensors="pt" )
# Second real batch
SCREAMING_SNAKE_CASE : Tuple = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=a ) , self.EXPECTED_OUTPUTS )
class _UpperCamelCase ( __A ):
'''simple docstring'''
def __UpperCamelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = "facebook/opt-350m"
super().setUp()
def __UpperCamelCase ( self : int ) -> List[Any]:
"""simple docstring"""
if version.parse(importlib.metadata.version("bitsandbytes" ) ) < version.parse("0.37.0" ):
return
# Step 1: freeze all parameters
SCREAMING_SNAKE_CASE : Dict = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=a )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
SCREAMING_SNAKE_CASE : Any = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
SCREAMING_SNAKE_CASE : Union[str, Any] = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(a ) ):
SCREAMING_SNAKE_CASE : Optional[Any] = LoRALayer(module.q_proj , rank=16 )
SCREAMING_SNAKE_CASE : Optional[int] = LoRALayer(module.k_proj , rank=16 )
SCREAMING_SNAKE_CASE : Optional[int] = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
SCREAMING_SNAKE_CASE : Dict = self.tokenizer("Test batch " , return_tensors="pt" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
SCREAMING_SNAKE_CASE : str = model.forward(**a )
out.logits.norm().backward()
for module in model.modules():
if isinstance(a , a ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(a , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ ='gpt2-xl'
lowerCamelCase__ =3.3191854854152187
| 25 |
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
_a : Optional[Any] = 100
_a : Dict = set(range(3, NUM_PRIMES, 2))
primes.add(2)
_a : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_0_0 )
def _a (lowercase__ : int ) -> set[int]:
"""simple docstring"""
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
__snake_case = set()
__snake_case = 42
__snake_case = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def _a (lowercase__ : int = 5_0_0_0 ) -> int | None:
"""simple docstring"""
for number_to_partition in range(1 , lowercase__ ):
if len(partition(lowercase__ ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f'''{solution() = }''')
| 56 | 0 |
'''simple docstring'''
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class _A ( nn.Module ):
def __init__( self : Optional[Any] , __magic_name__ : int = 16 , __magic_name__ : int = 88 , __magic_name__ : Optional[int] = None , __magic_name__ : int = 1 , __magic_name__ : float = 0.0 , __magic_name__ : int = 32 , __magic_name__ : Optional[int] = None , __magic_name__ : bool = False , __magic_name__ : Optional[int] = None , __magic_name__ : Optional[int] = None , __magic_name__ : str = "geglu" , __magic_name__ : Optional[int] = None , ) -> Any:
"""simple docstring"""
super().__init__()
__snake_case : List[Any] = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__magic_name__ , attention_head_dim=__magic_name__ , in_channels=__magic_name__ , num_layers=__magic_name__ , dropout=__magic_name__ , norm_num_groups=__magic_name__ , cross_attention_dim=__magic_name__ , attention_bias=__magic_name__ , sample_size=__magic_name__ , num_vector_embeds=__magic_name__ , activation_fn=__magic_name__ , num_embeds_ada_norm=__magic_name__ , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
__snake_case : List[Any] = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
__snake_case : Any = [77, 2_57]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
__snake_case : int = [1, 0]
def lowercase__ ( self : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : str , __magic_name__ : List[str]=None , __magic_name__ : int=None , __magic_name__ : Optional[Any]=None , __magic_name__ : bool = True , ) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = hidden_states
__snake_case : int = []
__snake_case : int = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
__snake_case : Tuple = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
__snake_case : Tuple = self.transformer_index_for_condition[i]
__snake_case : Optional[Any] = self.transformers[transformer_index](
__magic_name__ , encoder_hidden_states=__magic_name__ , timestep=__magic_name__ , cross_attention_kwargs=__magic_name__ , return_dict=__magic_name__ , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
__snake_case : List[Any] = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
__snake_case : List[str] = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__magic_name__ )
| 26 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_a : str = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def _a () -> Dict:
"""simple docstring"""
__snake_case = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
__snake_case = get_sagemaker_input()
else:
__snake_case = get_cluster_input()
return config
def _a (lowercase__ : Union[str, Any]=None ) -> int:
"""simple docstring"""
if subparsers is not None:
__snake_case = subparsers.add_parser('config' , description=lowercase__ )
else:
__snake_case = argparse.ArgumentParser('Accelerate config command' , description=lowercase__ )
parser.add_argument(
'--config_file' , default=lowercase__ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=lowercase__ )
return parser
def _a (lowercase__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case = get_user_input()
if args.config_file is not None:
__snake_case = args.config_file
else:
if not os.path.isdir(lowercase__ ):
os.makedirs(lowercase__ )
__snake_case = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(lowercase__ )
else:
config.to_yaml_file(lowercase__ )
print(f'accelerate configuration saved at {config_file}' )
def _a () -> int:
"""simple docstring"""
__snake_case = config_command_parser()
__snake_case = parser.parse_args()
config_command(lowercase__ )
if __name__ == "__main__":
main()
| 56 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Optional[int] = logging.get_logger(__name__)
__A : Union[str, Any] = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class lowerCamelCase( __snake_case ):
'''simple docstring'''
__magic_name__ = 'openai-gpt'
__magic_name__ = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , snake_case_=4_0478 , snake_case_=512 , snake_case_=768 , snake_case_=12 , snake_case_=12 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=1E-5 , snake_case_=0.02 , snake_case_="cls_index" , snake_case_=True , snake_case_=None , snake_case_=True , snake_case_=0.1 , **snake_case_ , ):
_A = vocab_size
_A = n_positions
_A = n_embd
_A = n_layer
_A = n_head
_A = afn
_A = resid_pdrop
_A = embd_pdrop
_A = attn_pdrop
_A = layer_norm_epsilon
_A = initializer_range
_A = summary_type
_A = summary_use_proj
_A = summary_activation
_A = summary_first_dropout
_A = summary_proj_to_labels
super().__init__(**snake_case_ )
| 27 |
'''simple docstring'''
from __future__ import annotations
import math
def _a (lowercase__ : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowercase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
_a : Dict = [num for num in range(3, 100_001, 2) if not is_prime(num)]
def _a (lowercase__ : int ) -> list[int]:
"""simple docstring"""
if not isinstance(lowercase__ , lowercase__ ):
raise ValueError('n must be an integer' )
if n <= 0:
raise ValueError('n must be >= 0' )
__snake_case = []
for num in range(len(lowercase__ ) ):
__snake_case = 0
while 2 * i * i <= odd_composites[num]:
__snake_case = odd_composites[num] - 2 * i * i
if is_prime(lowercase__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(lowercase__ ) == n:
return list_nums
return []
def _a () -> int:
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 56 | 0 |
'''simple docstring'''
UpperCamelCase_ = [
"Audio",
"Array2D",
"Array3D",
"Array4D",
"Array5D",
"ClassLabel",
"Features",
"Sequence",
"Value",
"Image",
"Translation",
"TranslationVariableLanguages",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 28 |
'''simple docstring'''
from __future__ import annotations
def _a (lowercase__ : int , lowercase__ : int ) -> list[str]:
"""simple docstring"""
if partitions <= 0:
raise ValueError('partitions must be a positive number!' )
if partitions > number_of_bytes:
raise ValueError('partitions can not > number_of_bytes!' )
__snake_case = number_of_bytes // partitions
__snake_case = []
for i in range(lowercase__ ):
__snake_case = i * bytes_per_partition + 1
__snake_case = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
if days_between_payments <= 0:
raise ValueError('''days_between_payments must be > 0''' )
if daily_interest_rate < 0:
raise ValueError('''daily_interest_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * daily_interest_rate * days_between_payments
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,):
if number_of_compounding_periods <= 0:
raise ValueError('''number_of_compounding_periods must be > 0''' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,):
if number_of_years <= 0:
raise ValueError('''number_of_years must be > 0''' )
if nominal_annual_percentage_rate < 0:
raise ValueError('''nominal_annual_percentage_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return compound_interest(
lowerCAmelCase__ ,nominal_annual_percentage_rate / 365 ,number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 |
'''simple docstring'''
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class _lowercase ( __lowercase ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.0_1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1000 ) -> Tuple:
__snake_case = p_stop
__snake_case = max_length
def __iter__( self : Any ) -> Union[str, Any]:
__snake_case = 0
__snake_case = False
while not stop and count < self.max_length:
yield count
count += 1
__snake_case = random.random() < self.p_stop
class _lowercase ( unittest.TestCase ):
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : str=False , SCREAMING_SNAKE_CASE_ : str=True ) -> Union[str, Any]:
__snake_case = [
BatchSamplerShard(SCREAMING_SNAKE_CASE_ , 2 , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
for i in range(2 )
]
__snake_case = [list(SCREAMING_SNAKE_CASE_ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(SCREAMING_SNAKE_CASE_ ) for shard in batch_sampler_shards] , [len(SCREAMING_SNAKE_CASE_ ) for e in expected] )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Tuple ) -> str:
# Check the shards when the dataset is a round multiple of total batch size.
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] ) -> Union[str, Any]:
# Check the shards when the dataset is a round multiple of batch size.
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size.
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
def a ( self : str ) -> str:
# Check the shards when the dataset is a round multiple of total batch size.
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[[0, 1]], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
def a ( self : int ) -> Tuple:
# Check the shards when the dataset is a round multiple of batch size.
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size.
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[[0, 1]], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[int] ) -> Tuple:
__snake_case = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
__snake_case = [BatchSamplerShard(SCREAMING_SNAKE_CASE_ , 2 , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int=False , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : int=False ) -> List[Any]:
random.seed(SCREAMING_SNAKE_CASE_ )
__snake_case = list(SCREAMING_SNAKE_CASE_ )
__snake_case = [
IterableDatasetShard(
SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , drop_last=SCREAMING_SNAKE_CASE_ , num_processes=SCREAMING_SNAKE_CASE_ , process_index=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , )
for i in range(SCREAMING_SNAKE_CASE_ )
]
__snake_case = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(SCREAMING_SNAKE_CASE_ )
iterable_dataset_lists.append(list(SCREAMING_SNAKE_CASE_ ) )
__snake_case = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
__snake_case = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
self.assertTrue(len(SCREAMING_SNAKE_CASE_ ) % shard_batch_size == 0 )
__snake_case = []
for idx in range(0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(SCREAMING_SNAKE_CASE_ ) < len(SCREAMING_SNAKE_CASE_ ):
reference += reference
self.assertListEqual(SCREAMING_SNAKE_CASE_ , reference[: len(SCREAMING_SNAKE_CASE_ )] )
def a ( self : Dict ) -> Tuple:
__snake_case = 42
__snake_case = RandomIterableDataset()
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Edge case with a very small dataset
__snake_case = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] ) -> str:
__snake_case = BatchSampler(range(16 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = SkipBatchSampler(SCREAMING_SNAKE_CASE_ , 2 )
self.assertListEqual(list(SCREAMING_SNAKE_CASE_ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a ( self : str ) -> Union[str, Any]:
__snake_case = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a ( self : Any ) -> str:
__snake_case = DataLoader(list(range(16 ) ) , batch_size=4 )
__snake_case = skip_first_batches(SCREAMING_SNAKE_CASE_ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a ( self : Dict ) -> Optional[Any]:
__snake_case = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def a ( self : Tuple ) -> Dict:
Accelerator()
__snake_case = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 56 | 0 |
from collections import deque
from .hash_table import HashTable
class __a( _a ):
"""simple docstring"""
def __init__( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> List[Any]:
super().__init__(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple:
UpperCAmelCase_ : List[Any] = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = self.values[key]
def a__ ( self ) -> int:
return (
sum(self.charge_factor - len(_SCREAMING_SNAKE_CASE ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ) -> Optional[int]:
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(_SCREAMING_SNAKE_CASE ) == 0
):
return key
return super()._collision_resolution(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
| 30 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
_a : int = get_tests_dir("fixtures/test_sentencepiece.model")
_a : Dict = {"target_lang": "fi", "source_lang": "en"}
_a : Optional[int] = ">>zh<<"
_a : List[str] = "Helsinki-NLP/"
if is_torch_available():
_a : List[str] = "pt"
elif is_tf_available():
_a : Dict = "tf"
else:
_a : Union[str, Any] = "jax"
@require_sentencepiece
class _lowercase ( __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : int = MarianTokenizer
_SCREAMING_SNAKE_CASE : str = False
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
def a ( self : int ) -> int:
super().setUp()
__snake_case = ['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>']
__snake_case = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
__snake_case = Path(self.tmpdirname )
save_json(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['vocab'] )
save_json(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['tokenizer_config_file'] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['source_spm'] )
copyfile(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['target_spm'] )
__snake_case = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def a ( self : int , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> MarianTokenizer:
return MarianTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def a ( self : str , SCREAMING_SNAKE_CASE_ : List[str] ) -> List[Any]:
return (
"This is a test",
"This is a test",
)
def a ( self : int ) -> Optional[Any]:
__snake_case = '</s>'
__snake_case = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def a ( self : Dict ) -> List[str]:
__snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '</s>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '<pad>' )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 9 )
def a ( self : List[Any] ) -> str:
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def a ( self : Any ) -> Optional[int]:
__snake_case = MarianTokenizer.from_pretrained(f'{ORG_NAME}opus-mt-en-de' )
__snake_case = en_de_tokenizer(['I am a small frog'] , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = [38, 121, 14, 697, 3_8848, 0]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , batch.input_ids[0] )
__snake_case = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
__snake_case = [x.name for x in Path(SCREAMING_SNAKE_CASE_ ).glob('*' )]
self.assertIn('source.spm' , SCREAMING_SNAKE_CASE_ )
MarianTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[int] ) -> Any:
__snake_case = self.get_tokenizer()
__snake_case = tok(
['I am a small frog' * 1000, 'I am a small frog'] , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def a ( self : Tuple ) -> Dict:
__snake_case = self.get_tokenizer()
__snake_case = tok(['I am a tiny frog', 'I am a small frog'] , padding=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def a ( self : int ) -> int:
# fmt: off
__snake_case = {'input_ids': [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE_ , model_name='Helsinki-NLP/opus-mt-en-de' , revision='1a8c2263da11e68e50938f97e10cd57820bd504c' , decode_kwargs={'use_source_tokenizer': True} , )
def a ( self : Dict ) -> str:
__snake_case = MarianTokenizer.from_pretrained('hf-internal-testing/test-marian-two-vocabs' )
__snake_case = 'Tämä on testi'
__snake_case = 'This is a test'
__snake_case = [76, 7, 2047, 2]
__snake_case = [69, 12, 11, 940, 2]
__snake_case = tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer(text_target=SCREAMING_SNAKE_CASE_ ).input_ids
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 56 | 0 |
import math
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> str:
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
while num > 0:
SCREAMING_SNAKE_CASE_ = num % 8
SCREAMING_SNAKE_CASE_ = octal + (remainder * math.floor(math.pow(10 , __UpperCAmelCase ) ))
counter += 1
SCREAMING_SNAKE_CASE_ = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return f"0o{int(__UpperCAmelCase )}"
def UpperCAmelCase_ ( ) -> None:
print('\n2 in octal is:' )
print(decimal_to_octal(2 ) ) # = 2
print('\n8 in octal is:' )
print(decimal_to_octal(8 ) ) # = 10
print('\n65 in octal is:' )
print(decimal_to_octal(65 ) ) # = 101
print('\n216 in octal is:' )
print(decimal_to_octal(2_16 ) ) # = 330
print('\n512 in octal is:' )
print(decimal_to_octal(5_12 ) ) # = 1000
print('\n' )
if __name__ == "__main__":
main()
| 31 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def _a (lowercase__ : bytes ) -> bytes:
"""simple docstring"""
if len(lowercase__ ) != 3_2:
raise ValueError('Input must be of length 32' )
__snake_case = B''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def _a (lowercase__ : int ) -> bytes:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
__snake_case = format(lowercase__ , '08x' )[-8:]
__snake_case = B''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def _a (lowercase__ : bytes ) -> bytes:
"""simple docstring"""
__snake_case = B''
for char in message:
bit_string += format(lowercase__ , '08b' ).encode('utf-8' )
__snake_case = format(len(lowercase__ ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(lowercase__ ) % 5_1_2 != 4_4_8:
bit_string += b"0"
bit_string += to_little_endian(start_len[3_2:] ) + to_little_endian(start_len[:3_2] )
return bit_string
def _a (lowercase__ : bytes ) -> Generator[list[int], None, None]:
"""simple docstring"""
if len(lowercase__ ) % 5_1_2 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(lowercase__ ) , 5_1_2 ):
__snake_case = bit_string[pos : pos + 5_1_2]
__snake_case = []
for i in range(0 , 5_1_2 , 3_2 ):
block_words.append(int(to_little_endian(block[i : i + 3_2] ) , 2 ) )
yield block_words
def _a (lowercase__ : int ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
__snake_case = format(lowercase__ , '032b' )
__snake_case = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(lowercase__ , 2 )
def _a (lowercase__ : int , lowercase__ : int ) -> int:
"""simple docstring"""
return (a + b) % 2**3_2
def _a (lowercase__ : int , lowercase__ : int ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (3_2 - shift))) % 2**3_2
def _a (lowercase__ : bytes ) -> bytes:
"""simple docstring"""
__snake_case = preprocess(lowercase__ )
__snake_case = [int(2**3_2 * abs(sin(i + 1 ) ) ) for i in range(6_4 )]
# Starting states
__snake_case = 0x6_7_4_5_2_3_0_1
__snake_case = 0xE_F_C_D_A_B_8_9
__snake_case = 0x9_8_B_A_D_C_F_E
__snake_case = 0x1_0_3_2_5_4_7_6
__snake_case = [
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(lowercase__ ):
__snake_case = aa
__snake_case = ba
__snake_case = ca
__snake_case = da
# Hash current chunk
for i in range(6_4 ):
if i <= 1_5:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
__snake_case = d ^ (b & (c ^ d))
__snake_case = i
elif i <= 3_1:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
__snake_case = c ^ (d & (b ^ c))
__snake_case = (5 * i + 1) % 1_6
elif i <= 4_7:
__snake_case = b ^ c ^ d
__snake_case = (3 * i + 5) % 1_6
else:
__snake_case = c ^ (b | not_aa(lowercase__ ))
__snake_case = (7 * i) % 1_6
__snake_case = (f + a + added_consts[i] + block_words[g]) % 2**3_2
__snake_case = d
__snake_case = c
__snake_case = b
__snake_case = sum_aa(lowercase__ , left_rotate_aa(lowercase__ , shift_amounts[i] ) )
# Add hashed chunk to running total
__snake_case = sum_aa(lowercase__ , lowercase__ )
__snake_case = sum_aa(lowercase__ , lowercase__ )
__snake_case = sum_aa(lowercase__ , lowercase__ )
__snake_case = sum_aa(lowercase__ , lowercase__ )
__snake_case = reformat_hex(lowercase__ ) + reformat_hex(lowercase__ ) + reformat_hex(lowercase__ ) + reformat_hex(lowercase__ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 | 0 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] ) -> int:
"""simple docstring"""
_UpperCAmelCase = MobileBertConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
print(F'''Building PyTorch model from configuration: {config}''' )
_UpperCAmelCase = MobileBertForPreTraining(SCREAMING_SNAKE_CASE_ )
# Load weights from tf checkpoint
_UpperCAmelCase = load_tf_weights_in_mobilebert(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--mobilebert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained MobileBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 32 |
'''simple docstring'''
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def _a (lowercase__ : str , lowercase__ : str , lowercase__ : Optional[str] = None ) -> str:
"""simple docstring"""
if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release:
# old versions of hfh don't url-encode the file path
__snake_case = quote(lowercase__ )
return hfh.hf_hub_url(lowercase__ , lowercase__ , repo_type='dataset' , revision=lowercase__ )
| 56 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
class __magic_name__ (snake_case_ ,snake_case_ ):
'''simple docstring'''
__lowercase : Optional[Any] = 'maskformer-swin'
__lowercase : Tuple = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self:Optional[Any] , _a:int=2_24 , _a:Any=4 , _a:str=3 , _a:Tuple=96 , _a:Any=[2, 2, 6, 2] , _a:Any=[3, 6, 12, 24] , _a:Union[str, Any]=7 , _a:List[Any]=4.0 , _a:Optional[Any]=True , _a:Union[str, Any]=0.0 , _a:int=0.0 , _a:Optional[int]=0.1 , _a:Tuple="gelu" , _a:Union[str, Any]=False , _a:List[Any]=0.02 , _a:List[str]=1e-5 , _a:str=None , _a:Tuple=None , **_a:Dict , ):
super().__init__(**_a )
snake_case__ = image_size
snake_case__ = patch_size
snake_case__ = num_channels
snake_case__ = embed_dim
snake_case__ = depths
snake_case__ = len(_a )
snake_case__ = num_heads
snake_case__ = window_size
snake_case__ = mlp_ratio
snake_case__ = qkv_bias
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = drop_path_rate
snake_case__ = hidden_act
snake_case__ = use_absolute_embeddings
snake_case__ = layer_norm_eps
snake_case__ = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case__ = int(embed_dim * 2 ** (len(_a ) - 1) )
snake_case__ = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(_a ) + 1 )]
snake_case__ , snake_case__ = get_aligned_output_features_output_indices(
out_features=_a , out_indices=_a , stage_names=self.stage_names )
| 33 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def _a (lowercase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _lowercase ( nn.Module ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : nn.Module , SCREAMING_SNAKE_CASE_ : int ) -> str:
super().__init__()
__snake_case = module
__snake_case = nn.Sequential(
nn.Linear(module.in_features , SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ ) , nn.Linear(SCREAMING_SNAKE_CASE_ , module.out_features , bias=SCREAMING_SNAKE_CASE_ ) , )
__snake_case = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=SCREAMING_SNAKE_CASE_ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Union[str, Any]:
return self.module(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) + self.adapter(SCREAMING_SNAKE_CASE_ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowercase ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
_SCREAMING_SNAKE_CASE : Tuple = "bigscience/bloom-1b7"
# Constant values
_SCREAMING_SNAKE_CASE : Union[str, Any] = 2.109659552692574
_SCREAMING_SNAKE_CASE : Optional[Any] = "Hello my name is"
_SCREAMING_SNAKE_CASE : List[str] = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
_SCREAMING_SNAKE_CASE : Dict = 1_0
def a ( self : Optional[Any] ) -> List[Any]:
# Models and tokenizer
__snake_case = AutoTokenizer.from_pretrained(self.model_name )
class _lowercase ( __lowercase ):
def a ( self : Union[str, Any] ) -> List[str]:
super().setUp()
# Models and tokenizer
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto' )
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
def a ( self : Optional[Any] ) -> Any:
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def a ( self : Optional[Any] ) -> int:
__snake_case = self.model_abit.config
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'quantization_config' ) )
__snake_case = config.to_dict()
__snake_case = config.to_diff_dict()
__snake_case = config.to_json_string()
def a ( self : Optional[Any] ) -> str:
from bitsandbytes.nn import Paramsabit
__snake_case = self.model_fpaa.get_memory_footprint()
__snake_case = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
__snake_case = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def a ( self : Union[str, Any] ) -> Optional[Any]:
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(SCREAMING_SNAKE_CASE_ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def a ( self : Union[str, Any] ) -> int:
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
__snake_case = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) , self.EXPECTED_OUTPUTS )
def a ( self : Optional[Any] ) -> Dict:
__snake_case = BitsAndBytesConfig()
__snake_case = True
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
__snake_case = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) , self.EXPECTED_OUTPUTS )
def a ( self : List[Any] ) -> str:
with self.assertRaises(SCREAMING_SNAKE_CASE_ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(SCREAMING_SNAKE_CASE_ )
def a ( self : Any ) -> Union[str, Any]:
__snake_case = BitsAndBytesConfig()
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=SCREAMING_SNAKE_CASE_ , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' , bnb_abit_quant_type='nf4' , )
def a ( self : Tuple ) -> Dict:
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with `str`
self.model_abit.to('cpu' )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0' ) )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
__snake_case = self.model_fpaa.to(torch.floataa )
__snake_case = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
__snake_case = self.model_fpaa.to('cpu' )
# Check this does not throw an error
__snake_case = self.model_fpaa.half()
# Check this does not throw an error
__snake_case = self.model_fpaa.float()
def a ( self : Tuple ) -> Union[str, Any]:
__snake_case = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowercase ( unittest.TestCase ):
@classmethod
def a ( cls : Union[str, Any] ) -> Dict:
__snake_case = 't5-small'
__snake_case = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
__snake_case = AutoTokenizer.from_pretrained(cls.model_name )
__snake_case = 'Translate in German: Hello, my dog is cute'
def a ( self : List[Any] ) -> str:
gc.collect()
torch.cuda.empty_cache()
def a ( self : int ) -> Optional[Any]:
from transformers import TaForConditionalGeneration
__snake_case = TaForConditionalGeneration._keep_in_fpaa_modules
__snake_case = None
# test with `t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
# test with `flan-t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
__snake_case = modules
def a ( self : List[str] ) -> Any:
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
# test with `flan-t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
class _lowercase ( __lowercase ):
def a ( self : Dict ) -> str:
super().setUp()
# model_name
__snake_case = 'bigscience/bloom-560m'
__snake_case = 't5-small'
# Different types of model
__snake_case = AutoModel.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# Sequence classification model
__snake_case = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# CausalLM model
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# Seq2seq model
__snake_case = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
def a ( self : int ) -> Dict:
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def a ( self : Any ) -> Optional[Any]:
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class _lowercase ( __lowercase ):
def a ( self : str ) -> Union[str, Any]:
super().setUp()
def a ( self : Optional[Any] ) -> str:
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def a ( self : Optional[int] ) -> List[str]:
__snake_case = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__snake_case = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class _lowercase ( __lowercase ):
def a ( self : Optional[int] ) -> Union[str, Any]:
super().setUp()
def a ( self : Optional[int] ) -> List[Any]:
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='balanced' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
# Second real batch
__snake_case = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) , self.EXPECTED_OUTPUTS )
class _lowercase ( __lowercase ):
def a ( self : Any ) -> str:
__snake_case = 'facebook/opt-350m'
super().setUp()
def a ( self : int ) -> List[Any]:
if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ):
return
# Step 1: freeze all parameters
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
__snake_case = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__snake_case = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(SCREAMING_SNAKE_CASE_ ) ):
__snake_case = LoRALayer(module.q_proj , rank=16 )
__snake_case = LoRALayer(module.k_proj , rank=16 )
__snake_case = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
__snake_case = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__snake_case = model.forward(**SCREAMING_SNAKE_CASE_ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(SCREAMING_SNAKE_CASE_ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = "gpt2-xl"
_SCREAMING_SNAKE_CASE : Optional[int] = 3.3191854854152187
| 56 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class snake_case_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = IFPipeline
A_ = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
A_ = TEXT_TO_IMAGE_BATCH_PARAMS
A_ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def UpperCAmelCase__ ( self) -> Optional[int]:
return self._get_dummy_components()
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=0) -> Optional[int]:
if str(lowerCamelCase_).startswith('''mps'''):
UpperCamelCase = torch.manual_seed(lowerCamelCase_)
else:
UpperCamelCase = torch.Generator(device=lowerCamelCase_).manual_seed(lowerCamelCase_)
UpperCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase__ ( self) -> Optional[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''')
def UpperCAmelCase__ ( self) -> Tuple:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1)
def UpperCAmelCase__ ( self) -> Optional[int]:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2)
def UpperCAmelCase__ ( self) -> List[Any]:
self._test_save_load_local()
def UpperCAmelCase__ ( self) -> Any:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCAmelCase__ ( self) -> List[Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3)
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self) -> int:
# if
UpperCamelCase = IFPipeline.from_pretrained('''DeepFloyd/IF-I-XL-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa)
UpperCamelCase = IFSuperResolutionPipeline.from_pretrained(
'''DeepFloyd/IF-II-L-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_)
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('''cuda''')
UpperCamelCase , UpperCamelCase = pipe_a.encode_prompt('''anime turtle''' , device='''cuda''')
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
UpperCamelCase = None
UpperCamelCase = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
UpperCamelCase = IFImgaImgPipeline(**pipe_a.components)
UpperCamelCase = IFImgaImgSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_imgaimg(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
UpperCamelCase = IFInpaintingPipeline(**pipe_a.components)
UpperCamelCase = IFInpaintingSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_inpainting(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Tuple:
# pipeline 1
_start_torch_memory_measurement()
UpperCamelCase = torch.Generator(device='''cpu''').manual_seed(0)
UpperCamelCase = pipe_a(
prompt_embeds=lowerCamelCase_ , negative_prompt_embeds=lowerCamelCase_ , num_inference_steps=2 , generator=lowerCamelCase_ , output_type='''np''' , )
UpperCamelCase = output.images[0]
assert image.shape == (6_4, 6_4, 3)
UpperCamelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_3 * 1_0**9
UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy''')
assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_)
# pipeline 2
_start_torch_memory_measurement()
UpperCamelCase = torch.Generator(device='''cpu''').manual_seed(0)
UpperCamelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0)).to(lowerCamelCase_)
UpperCamelCase = pipe_a(
prompt_embeds=lowerCamelCase_ , negative_prompt_embeds=lowerCamelCase_ , image=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=2 , output_type='''np''' , )
UpperCamelCase = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
UpperCamelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy''')
assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Tuple:
# pipeline 1
_start_torch_memory_measurement()
UpperCamelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0)).to(lowerCamelCase_)
UpperCamelCase = torch.Generator(device='''cpu''').manual_seed(0)
UpperCamelCase = pipe_a(
prompt_embeds=lowerCamelCase_ , negative_prompt_embeds=lowerCamelCase_ , image=lowerCamelCase_ , num_inference_steps=2 , generator=lowerCamelCase_ , output_type='''np''' , )
UpperCamelCase = output.images[0]
assert image.shape == (6_4, 6_4, 3)
UpperCamelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy''')
assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_)
# pipeline 2
_start_torch_memory_measurement()
UpperCamelCase = torch.Generator(device='''cpu''').manual_seed(0)
UpperCamelCase = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0)).to(lowerCamelCase_)
UpperCamelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0)).to(lowerCamelCase_)
UpperCamelCase = pipe_a(
prompt_embeds=lowerCamelCase_ , negative_prompt_embeds=lowerCamelCase_ , image=lowerCamelCase_ , original_image=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=2 , output_type='''np''' , )
UpperCamelCase = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
UpperCamelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy''')
assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> int:
# pipeline 1
_start_torch_memory_measurement()
UpperCamelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0)).to(lowerCamelCase_)
UpperCamelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(1)).to(lowerCamelCase_)
UpperCamelCase = torch.Generator(device='''cpu''').manual_seed(0)
UpperCamelCase = pipe_a(
prompt_embeds=lowerCamelCase_ , negative_prompt_embeds=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , num_inference_steps=2 , generator=lowerCamelCase_ , output_type='''np''' , )
UpperCamelCase = output.images[0]
assert image.shape == (6_4, 6_4, 3)
UpperCamelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy''')
assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_)
# pipeline 2
_start_torch_memory_measurement()
UpperCamelCase = torch.Generator(device='''cpu''').manual_seed(0)
UpperCamelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0)).to(lowerCamelCase_)
UpperCamelCase = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0)).to(lowerCamelCase_)
UpperCamelCase = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(1)).to(lowerCamelCase_)
UpperCamelCase = pipe_a(
prompt_embeds=lowerCamelCase_ , negative_prompt_embeds=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , original_image=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=2 , output_type='''np''' , )
UpperCamelCase = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
UpperCamelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy''')
assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_)
def __snake_case ( ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 34 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class _lowercase ( unittest.TestCase ):
def a ( self : int ) -> List[str]:
__snake_case = '| <pad> <unk> <s> </s> a b c d e f g h i j k'.split()
__snake_case = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
__snake_case = {
'unk_token': '<unk>',
'bos_token': '<s>',
'eos_token': '</s>',
}
__snake_case = {
'feature_size': 1,
'padding_value': 0.0,
'sampling_rate': 1_6000,
'return_attention_mask': False,
'do_normalize': True,
}
__snake_case = tempfile.mkdtemp()
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__snake_case = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE_ )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
with open(self.feature_extraction_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
# load decoder from hub
__snake_case = 'hf-internal-testing/ngram-beam-search-decoder'
def a ( self : Optional[int] , **SCREAMING_SNAKE_CASE_ : Tuple ) -> Dict:
__snake_case = self.add_kwargs_tokens_map.copy()
kwargs.update(SCREAMING_SNAKE_CASE_ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] , **SCREAMING_SNAKE_CASE_ : Any ) -> Optional[Any]:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def a ( self : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> Tuple:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **SCREAMING_SNAKE_CASE_ )
def a ( self : int ) -> Dict:
shutil.rmtree(self.tmpdirname )
def a ( self : int ) -> Tuple:
__snake_case = self.get_tokenizer()
__snake_case = self.get_feature_extractor()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
__snake_case = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE_ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , SCREAMING_SNAKE_CASE_ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , SCREAMING_SNAKE_CASE_ )
def a ( self : Dict ) -> Union[str, Any]:
__snake_case = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__snake_case = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def a ( self : str ) -> Tuple:
__snake_case = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['xx'] )
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE_ , 'include' ):
WavaVecaProcessorWithLM(
tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def a ( self : List[str] ) -> List[str]:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = floats_list((3, 1000) )
__snake_case = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
__snake_case = processor(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def a ( self : Tuple ) -> Tuple:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = 'This is a test string'
__snake_case = processor(text=SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer(SCREAMING_SNAKE_CASE_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a ( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any]=(2, 10, 16) , SCREAMING_SNAKE_CASE_ : Dict=77 ) -> Dict:
np.random.seed(SCREAMING_SNAKE_CASE_ )
return np.random.rand(*SCREAMING_SNAKE_CASE_ )
def a ( self : Any ) -> Tuple:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits(shape=(10, 16) , seed=13 )
__snake_case = processor.decode(SCREAMING_SNAKE_CASE_ )
__snake_case = decoder.decode_beams(SCREAMING_SNAKE_CASE_ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('</s> <s> </s>' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['fork'], ['spawn']] )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] ) -> Dict:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__snake_case = processor.batch_decode(SCREAMING_SNAKE_CASE_ )
else:
with get_context(SCREAMING_SNAKE_CASE_ ).Pool() as pool:
__snake_case = processor.batch_decode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = list(SCREAMING_SNAKE_CASE_ )
with get_context('fork' ).Pool() as p:
__snake_case = decoder.decode_beams_batch(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case , __snake_case , __snake_case = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.text )
self.assertListEqual(['<s> <s> </s>', '<s> <s> <s>'] , decoded_processor.text )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.logit_score )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.lm_score )
def a ( self : Any ) -> Dict:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits()
__snake_case = 15
__snake_case = -2_0.0
__snake_case = -4.0
__snake_case = processor.batch_decode(
SCREAMING_SNAKE_CASE_ , beam_width=SCREAMING_SNAKE_CASE_ , beam_prune_logp=SCREAMING_SNAKE_CASE_ , token_min_logp=SCREAMING_SNAKE_CASE_ , )
__snake_case = decoded_processor_out.text
__snake_case = list(SCREAMING_SNAKE_CASE_ )
with get_context('fork' ).Pool() as pool:
__snake_case = decoder.decode_beams_batch(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , beam_width=SCREAMING_SNAKE_CASE_ , beam_prune_logp=SCREAMING_SNAKE_CASE_ , token_min_logp=SCREAMING_SNAKE_CASE_ , )
__snake_case = [d[0][0] for d in decoded_decoder_out]
__snake_case = [d[0][2] for d in decoded_decoder_out]
__snake_case = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(['</s> <s> <s>', '<s> <s> <s>'] , SCREAMING_SNAKE_CASE_ )
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
def a ( self : Optional[Any] ) -> Tuple:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits()
__snake_case = 2.0
__snake_case = 5.0
__snake_case = -2_0.0
__snake_case = True
__snake_case = processor.batch_decode(
SCREAMING_SNAKE_CASE_ , alpha=SCREAMING_SNAKE_CASE_ , beta=SCREAMING_SNAKE_CASE_ , unk_score_offset=SCREAMING_SNAKE_CASE_ , lm_score_boundary=SCREAMING_SNAKE_CASE_ , )
__snake_case = decoded_processor_out.text
__snake_case = list(SCREAMING_SNAKE_CASE_ )
decoder.reset_params(
alpha=SCREAMING_SNAKE_CASE_ , beta=SCREAMING_SNAKE_CASE_ , unk_score_offset=SCREAMING_SNAKE_CASE_ , lm_score_boundary=SCREAMING_SNAKE_CASE_ , )
with get_context('fork' ).Pool() as pool:
__snake_case = decoder.decode_beams_batch(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
__snake_case = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(['<s> </s> <s> </s> </s>', '</s> </s> <s> </s> </s>'] , SCREAMING_SNAKE_CASE_ )
__snake_case = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -2_0.0 )
self.assertEqual(lm_model.score_boundary , SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] ) -> List[str]:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = processor.decoder.model_container[processor.decoder._model_key]
__snake_case = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
__snake_case = os.listdir(SCREAMING_SNAKE_CASE_ )
__snake_case = ['alphabet.json', 'language_model']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Dict ) -> Dict:
__snake_case = snapshot_download('hf-internal-testing/processor_with_lm' )
__snake_case = WavaVecaProcessorWithLM.from_pretrained(SCREAMING_SNAKE_CASE_ )
__snake_case = processor.decoder.model_container[processor.decoder._model_key]
__snake_case = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
__snake_case = os.listdir(SCREAMING_SNAKE_CASE_ )
__snake_case = os.listdir(SCREAMING_SNAKE_CASE_ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Any ) -> List[Any]:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = AutoProcessor.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = floats_list((3, 1000) )
__snake_case = processor_wavaveca(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
__snake_case = processor_auto(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
__snake_case = self._get_dummy_logits()
__snake_case = processor_wavaveca.batch_decode(SCREAMING_SNAKE_CASE_ )
__snake_case = processor_auto.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def a ( self : Dict ) -> Optional[int]:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
@staticmethod
def a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int:
__snake_case = [d[key] for d in offsets]
return retrieved_list
def a ( self : Optional[int] ) -> str:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = self._get_dummy_logits()[0]
__snake_case = processor.decode(SCREAMING_SNAKE_CASE_ , output_word_offsets=SCREAMING_SNAKE_CASE_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(' '.join(self.get_from_offsets(outputs['word_offsets'] , 'word' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'end_offset' ) , [1, 3, 5] )
def a ( self : Optional[Any] ) -> Optional[int]:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = self._get_dummy_logits()
__snake_case = processor.batch_decode(SCREAMING_SNAKE_CASE_ , output_word_offsets=SCREAMING_SNAKE_CASE_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(
[' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) for o in outputs['word_offsets']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'end_offset' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def a ( self : Optional[Any] ) -> Optional[Any]:
import torch
__snake_case = load_dataset('common_voice' , 'en' , split='train' , streaming=SCREAMING_SNAKE_CASE_ )
__snake_case = ds.cast_column('audio' , datasets.Audio(sampling_rate=1_6000 ) )
__snake_case = iter(SCREAMING_SNAKE_CASE_ )
__snake_case = next(SCREAMING_SNAKE_CASE_ )
__snake_case = AutoProcessor.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
__snake_case = WavaVecaForCTC.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__snake_case = processor(sample['audio']['array'] , return_tensors='pt' ).input_values
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ ).logits.cpu().numpy()
__snake_case = processor.decode(logits[0] , output_word_offsets=SCREAMING_SNAKE_CASE_ )
__snake_case = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__snake_case = [
{
'start_time': d['start_offset'] * time_offset,
'end_time': d['end_offset'] * time_offset,
'word': d['word'],
}
for d in output['word_offsets']
]
__snake_case = 'WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'
# output words
self.assertEqual(' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) , output.text )
# output times
__snake_case = torch.tensor(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'start_time' ) )
__snake_case = torch.tensor(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'end_time' ) )
# fmt: off
__snake_case = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
__snake_case = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=0.0_1 ) )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=0.0_1 ) )
| 56 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ :Union[str, Any] = {
'configuration_roformer': ['ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoFormerConfig', 'RoFormerOnnxConfig'],
'tokenization_roformer': ['RoFormerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :Union[str, Any] = ['RoFormerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :int = [
'ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoFormerForCausalLM',
'RoFormerForMaskedLM',
'RoFormerForMultipleChoice',
'RoFormerForQuestionAnswering',
'RoFormerForSequenceClassification',
'RoFormerForTokenClassification',
'RoFormerLayer',
'RoFormerModel',
'RoFormerPreTrainedModel',
'load_tf_weights_in_roformer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :List[Any] = [
'TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRoFormerForCausalLM',
'TFRoFormerForMaskedLM',
'TFRoFormerForMultipleChoice',
'TFRoFormerForQuestionAnswering',
'TFRoFormerForSequenceClassification',
'TFRoFormerForTokenClassification',
'TFRoFormerLayer',
'TFRoFormerModel',
'TFRoFormerPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :List[Any] = [
'FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxRoFormerForMaskedLM',
'FlaxRoFormerForMultipleChoice',
'FlaxRoFormerForQuestionAnswering',
'FlaxRoFormerForSequenceClassification',
'FlaxRoFormerForTokenClassification',
'FlaxRoFormerModel',
'FlaxRoFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
a_ :Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 35 |
'''simple docstring'''
def _a (lowercase__ : int , lowercase__ : int ) -> float:
"""simple docstring"""
return base * power(lowercase__ , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print("Raise base to the power of exponent using recursion...")
_a : Union[str, Any] = int(input("Enter the base: ").strip())
_a : Any = int(input("Enter the exponent: ").strip())
_a : List[str] = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
_a : List[Any] = 1 / result
print(f'''{base} to the power of {exponent} is {result}''')
| 56 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowercase : Optional[int] = logging.get_logger(__name__)
__lowercase : Optional[Any] = '''▁'''
__lowercase : Union[str, Any] = {'''vocab_file''': '''sentencepiece.bpe.model'''}
__lowercase : Optional[Any] = {
'''vocab_file''': {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model'''
),
}
}
__lowercase : Optional[Any] = {
'''xlm-roberta-base''': 512,
'''xlm-roberta-large''': 512,
'''xlm-roberta-large-finetuned-conll02-dutch''': 512,
'''xlm-roberta-large-finetuned-conll02-spanish''': 512,
'''xlm-roberta-large-finetuned-conll03-english''': 512,
'''xlm-roberta-large-finetuned-conll03-german''': 512,
}
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : str = VOCAB_FILES_NAMES
__lowerCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Optional[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_="<s>" ,SCREAMING_SNAKE_CASE_="</s>" ,SCREAMING_SNAKE_CASE_="</s>" ,SCREAMING_SNAKE_CASE_="<s>" ,SCREAMING_SNAKE_CASE_="<unk>" ,SCREAMING_SNAKE_CASE_="<pad>" ,SCREAMING_SNAKE_CASE_="<mask>" ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it
snake_case : Tuple = AddedToken(SCREAMING_SNAKE_CASE_ ,lstrip=SCREAMING_SNAKE_CASE_ ,rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) else mask_token
snake_case : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE_ ,eos_token=SCREAMING_SNAKE_CASE_ ,unk_token=SCREAMING_SNAKE_CASE_ ,sep_token=SCREAMING_SNAKE_CASE_ ,cls_token=SCREAMING_SNAKE_CASE_ ,pad_token=SCREAMING_SNAKE_CASE_ ,mask_token=SCREAMING_SNAKE_CASE_ ,sp_model_kwargs=self.sp_model_kwargs ,**SCREAMING_SNAKE_CASE_ ,)
snake_case : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE_ ) )
snake_case : List[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case : str = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case : str = 1
snake_case : List[Any] = len(self.sp_model ) + self.fairseq_offset
snake_case : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
'''simple docstring'''
snake_case : Union[str, Any] = self.__dict__.copy()
snake_case : List[Any] = None
snake_case : int = self.sp_model.serialized_model_proto()
return state
def __setstate__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Any = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
snake_case : Optional[Any] = {}
snake_case : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case : Any = [self.cls_token_id]
snake_case : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ ,token_ids_a=SCREAMING_SNAKE_CASE_ ,already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
snake_case : Union[str, Any] = [self.sep_token_id]
snake_case : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case_ ( self ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Union[str, Any] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self.sp_model.encode(SCREAMING_SNAKE_CASE_ ,out_type=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case : str = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : List[str] = """""".join(SCREAMING_SNAKE_CASE_ ).replace(SCREAMING_SNAKE_CASE_ ,""" """ ).strip()
return out_string
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Optional[Any] = os.path.join(
SCREAMING_SNAKE_CASE_ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,SCREAMING_SNAKE_CASE_ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE_ ,"""wb""" ) as fi:
snake_case : List[str] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 36 |
'''simple docstring'''
import math
from collections.abc import Callable
def _a (lowercase__ : Callable[[float], float] , lowercase__ : float , lowercase__ : float ) -> float:
"""simple docstring"""
__snake_case = xa
__snake_case = xa
while True:
if x_n == x_na or function(lowercase__ ) == function(lowercase__ ):
raise ZeroDivisionError('float division by zero, could not find root' )
__snake_case = x_na - (
function(lowercase__ ) / ((function(lowercase__ ) - function(lowercase__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 1_0**-5:
return x_na
__snake_case = x_na
__snake_case = x_na
def _a (lowercase__ : float ) -> float:
"""simple docstring"""
return math.pow(lowercase__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 56 | 0 |
def UpperCamelCase_ ( __a ) -> list:
a__ : Union[str, Any] = [0] * len(__a )
for i in range(1 , len(__a ) ):
# use last results for better performance - dynamic programming
a__ : Dict = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
a__ : List[Any] = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
a__ : Any = j
return prefix_result
def UpperCamelCase_ ( __a ) -> int:
return max(prefix_function(__a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37 |
'''simple docstring'''
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class _lowercase ( __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : str = CpmAntTokenizer
_SCREAMING_SNAKE_CASE : Optional[Any] = False
def a ( self : Optional[Any] ) -> Any:
super().setUp()
__snake_case = [
'<d>',
'</d>',
'<s>',
'</s>',
'</_>',
'<unk>',
'<pad>',
'</n>',
'我',
'是',
'C',
'P',
'M',
'A',
'n',
't',
]
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
@tooslow
def a ( self : List[Any] ) -> Dict:
__snake_case = CpmAntTokenizer.from_pretrained('openbmb/cpm-ant-10b' )
__snake_case = '今天天气真好!'
__snake_case = ['今天', '天气', '真', '好', '!']
__snake_case = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = '今天天气真好!'
__snake_case = [tokenizer.bos_token] + tokens
__snake_case = [6, 9802, 1_4962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 56 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : int = logging.get_logger(__name__)
A_ : Any = {
"vinvino02/glpn-kitti": "https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json",
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''glpn'''
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=[2, 2, 2, 2] , __SCREAMING_SNAKE_CASE=[8, 4, 2, 1] , __SCREAMING_SNAKE_CASE=[3_2, 6_4, 1_6_0, 2_5_6] , __SCREAMING_SNAKE_CASE=[7, 3, 3, 3] , __SCREAMING_SNAKE_CASE=[4, 2, 2, 2] , __SCREAMING_SNAKE_CASE=[1, 2, 5, 8] , __SCREAMING_SNAKE_CASE=[4, 4, 4, 4] , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1e-6 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=1_0 , __SCREAMING_SNAKE_CASE=-1 , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
snake_case__ : int = num_channels
snake_case__ : Any = num_encoder_blocks
snake_case__ : str = depths
snake_case__ : str = sr_ratios
snake_case__ : Optional[int] = hidden_sizes
snake_case__ : Any = patch_sizes
snake_case__ : Any = strides
snake_case__ : Union[str, Any] = mlp_ratios
snake_case__ : int = num_attention_heads
snake_case__ : Optional[Any] = hidden_act
snake_case__ : str = hidden_dropout_prob
snake_case__ : Any = attention_probs_dropout_prob
snake_case__ : int = initializer_range
snake_case__ : List[Any] = drop_path_rate
snake_case__ : str = layer_norm_eps
snake_case__ : Optional[int] = decoder_hidden_size
snake_case__ : Union[str, Any] = max_depth
snake_case__ : List[Any] = head_in_index
| 38 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def _a (lowercase__ : list ) -> int:
"""simple docstring"""
if not postfix_notation:
return 0
__snake_case = {'+', '-', '*', '/'}
__snake_case = []
for token in postfix_notation:
if token in operations:
__snake_case , __snake_case = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(lowercase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 | 0 |
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = []
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
for v in tree.values():
shapes.extend(_fetch_dims(SCREAMING_SNAKE_CASE__ ) )
elif isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(SCREAMING_SNAKE_CASE__ ) )
elif isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('''Not supported''' )
return shapes
@torch.jit.ignore
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = []
for d in reversed(SCREAMING_SNAKE_CASE__ ):
idx.append(flat_idx % d )
snake_case_ = flat_idx // d
return tuple(reversed(SCREAMING_SNAKE_CASE__ ) )
@torch.jit.ignore
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , ):
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(SCREAMING_SNAKE_CASE__ ) -> None:
snake_case_ = True
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
snake_case_ = -1 * (i + 1)
l[reversed_idx] &= tally
snake_case_ = l[reversed_idx]
if start_edges is None:
snake_case_ = [s == 0 for s in start]
reduce_edge_list(SCREAMING_SNAKE_CASE__ )
if end_edges is None:
snake_case_ = [e == (d - 1) for e, d in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )]
reduce_edge_list(SCREAMING_SNAKE_CASE__ )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return [()]
elif len(SCREAMING_SNAKE_CASE__ ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
snake_case_ = []
snake_case_ = []
# Dimensions common to start and end can be selected directly
for s, e in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if s == e:
path_list.append(slice(SCREAMING_SNAKE_CASE__ , s + 1 ) )
else:
break
snake_case_ = tuple(SCREAMING_SNAKE_CASE__ )
snake_case_ = len(SCREAMING_SNAKE_CASE__ )
# start == end, and we're done
if divergence_idx == len(SCREAMING_SNAKE_CASE__ ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
snake_case_ = start[divergence_idx]
return tuple(
path + (slice(SCREAMING_SNAKE_CASE__ , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
snake_case_ = end[divergence_idx]
return tuple(
path + (slice(SCREAMING_SNAKE_CASE__ , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
snake_case_ = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = t.shape[:no_batch_dims]
snake_case_ = list(_flat_idx_to_idx(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# _get_minimal_slice_set is inclusive
snake_case_ = list(_flat_idx_to_idx(flat_end - 1 , SCREAMING_SNAKE_CASE__ ) )
# Get an ordered list of slices to perform
snake_case_ = _get_minimal_slice_set(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
snake_case_ = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False , ):
if not (len(SCREAMING_SNAKE_CASE__ ) > 0):
raise ValueError('''Must provide at least one input''' )
snake_case_ = [shape[:no_batch_dims] for shape in _fetch_dims(SCREAMING_SNAKE_CASE__ )]
snake_case_ = tuple([max(SCREAMING_SNAKE_CASE__ ) for s in zip(*SCREAMING_SNAKE_CASE__ )] )
def _prep_inputs(SCREAMING_SNAKE_CASE__ ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
snake_case_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
snake_case_ = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
snake_case_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
snake_case_ = tensor_tree_map(_prep_inputs , SCREAMING_SNAKE_CASE__ )
snake_case_ = None
if _out is not None:
snake_case_ = tensor_tree_map(lambda SCREAMING_SNAKE_CASE__ : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
snake_case_ = 1
for d in orig_batch_dims:
flat_batch_dim *= d
snake_case_ = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(SCREAMING_SNAKE_CASE__ ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
snake_case_ = 0
snake_case_ = prepped_outputs
for _ in range(SCREAMING_SNAKE_CASE__ ):
# Chunk the input
if not low_mem:
snake_case_ = _select_chunk
else:
snake_case_ = partial(
_chunk_slice , flat_start=SCREAMING_SNAKE_CASE__ , flat_end=min(SCREAMING_SNAKE_CASE__ , i + chunk_size ) , no_batch_dims=len(SCREAMING_SNAKE_CASE__ ) , )
snake_case_ = tensor_tree_map(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Run the layer on the chunk
snake_case_ = layer(**SCREAMING_SNAKE_CASE__ )
# Allocate space for the output
if out is None:
snake_case_ = tensor_tree_map(lambda SCREAMING_SNAKE_CASE__ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , SCREAMING_SNAKE_CASE__ )
# Put the chunk in its pre-allocated space
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
def assign(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> None:
for k, v in da.items():
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
assign(SCREAMING_SNAKE_CASE__ , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
snake_case_ = da[k]
assign(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
for xa, xa in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
snake_case_ = xa
elif isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
snake_case_ = output_chunk
else:
raise ValueError('''Not supported''' )
i += chunk_size
snake_case_ = tensor_tree_map(lambda SCREAMING_SNAKE_CASE__ : t.view(orig_batch_dims + t.shape[1:] ) , SCREAMING_SNAKE_CASE__ )
return out
class snake_case_ :
'''simple docstring'''
def __init__( self : Optional[Any] , _UpperCamelCase : int = 5_1_2 , ) ->Dict:
snake_case_ = max_chunk_size
snake_case_ = None
snake_case_ = None
def snake_case__( self : Any , _UpperCamelCase : Callable , _UpperCamelCase : tuple , _UpperCamelCase : int ) ->int:
logging.info('''Tuning chunk size...''' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
snake_case_ = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
snake_case_ = [c for c in candidates if c > min_chunk_size]
snake_case_ = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(_UpperCamelCase : int ) -> bool:
try:
with torch.no_grad():
fn(*_UpperCamelCase , chunk_size=_UpperCamelCase )
return True
except RuntimeError:
return False
snake_case_ = 0
snake_case_ = len(_UpperCamelCase ) - 1
while i > min_viable_chunk_size_index:
snake_case_ = test_chunk_size(candidates[i] )
if not viable:
snake_case_ = (min_viable_chunk_size_index + i) // 2
else:
snake_case_ = i
snake_case_ = (i + len(_UpperCamelCase ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def snake_case__( self : Union[str, Any] , _UpperCamelCase : Iterable , _UpperCamelCase : Iterable ) ->bool:
snake_case_ = True
for aa, aa in zip(_UpperCamelCase , _UpperCamelCase ):
assert type(_UpperCamelCase ) == type(_UpperCamelCase )
if isinstance(_UpperCamelCase , (list, tuple) ):
consistent &= self._compare_arg_caches(_UpperCamelCase , _UpperCamelCase )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
snake_case_ = [v for _, v in sorted(aa.items() , key=lambda _UpperCamelCase : x[0] )]
snake_case_ = [v for _, v in sorted(aa.items() , key=lambda _UpperCamelCase : x[0] )]
consistent &= self._compare_arg_caches(_UpperCamelCase , _UpperCamelCase )
else:
consistent &= aa == aa
return consistent
def snake_case__( self : str , _UpperCamelCase : Callable , _UpperCamelCase : tuple , _UpperCamelCase : int , ) ->int:
snake_case_ = True
snake_case_ = tree_map(lambda _UpperCamelCase : a.shape if isinstance(_UpperCamelCase , torch.Tensor ) else a , _UpperCamelCase , _UpperCamelCase )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(_UpperCamelCase )
snake_case_ = self._compare_arg_caches(self.cached_arg_data , _UpperCamelCase )
else:
# Otherwise, we can reuse the precomputed value
snake_case_ = False
if not consistent:
snake_case_ = self._determine_favorable_chunk_size(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
snake_case_ = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 39 |
'''simple docstring'''
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square(lowercase__ : int , lowercase__ : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
__snake_case = update_area_of_max_square(lowercase__ , col + 1 )
__snake_case = update_area_of_max_square(row + 1 , col + 1 )
__snake_case = update_area_of_max_square(row + 1 , lowercase__ )
if mat[row][col]:
__snake_case = 1 + min([right, diagonal, down] )
__snake_case = max(largest_square_area[0] , lowercase__ )
return sub_problem_sol
else:
return 0
__snake_case = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
__snake_case = update_area_of_max_square_using_dp_array(lowercase__ , col + 1 , lowercase__ )
__snake_case = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , lowercase__ )
__snake_case = update_area_of_max_square_using_dp_array(row + 1 , lowercase__ , lowercase__ )
if mat[row][col]:
__snake_case = 1 + min([right, diagonal, down] )
__snake_case = max(largest_square_area[0] , lowercase__ )
__snake_case = sub_problem_sol
return sub_problem_sol
else:
return 0
__snake_case = [0]
__snake_case = [[-1] * cols for _ in range(lowercase__ )]
update_area_of_max_square_using_dp_array(0 , 0 , lowercase__ )
return largest_square_area[0]
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
"""simple docstring"""
__snake_case = [[0] * (cols + 1) for _ in range(rows + 1 )]
__snake_case = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__snake_case = dp_array[row][col + 1]
__snake_case = dp_array[row + 1][col + 1]
__snake_case = dp_array[row + 1][col]
if mat[row][col] == 1:
__snake_case = 1 + min(lowercase__ , lowercase__ , lowercase__ )
__snake_case = max(dp_array[row][col] , lowercase__ )
else:
__snake_case = 0
return largest_square_area
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
"""simple docstring"""
__snake_case = [0] * (cols + 1)
__snake_case = [0] * (cols + 1)
__snake_case = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__snake_case = current_row[col + 1]
__snake_case = next_row[col + 1]
__snake_case = next_row[col]
if mat[row][col] == 1:
__snake_case = 1 + min(lowercase__ , lowercase__ , lowercase__ )
__snake_case = max(current_row[col] , lowercase__ )
else:
__snake_case = 0
__snake_case = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 56 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.