code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import os
from typing import Dict, List, Tuple, TypeVar, Union
UpperCamelCase__ = TypeVar("""T""")
UpperCamelCase__ = Union[List[T], Tuple[T, ...]]
UpperCamelCase__ = Union[T, List[T], Dict[str, T]]
UpperCamelCase__ = Union[str, bytes, os.PathLike]
| 92 |
'''simple docstring'''
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
def update_area_of_max_square(UpperCAmelCase , UpperCAmelCase ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
lowercase__ : int = update_area_of_max_square(UpperCAmelCase , col + 1 )
lowercase__ : Any = update_area_of_max_square(row + 1 , col + 1 )
lowercase__ : str = update_area_of_max_square(row + 1 , UpperCAmelCase )
if mat[row][col]:
lowercase__ : List[Any] = 1 + min([right, diagonal, down] )
lowercase__ : List[Any] = max(largest_square_area[0] , UpperCAmelCase )
return sub_problem_sol
else:
return 0
lowercase__ : Dict = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
def update_area_of_max_square_using_dp_array(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
lowercase__ : int = update_area_of_max_square_using_dp_array(UpperCAmelCase , col + 1 , UpperCAmelCase )
lowercase__ : Optional[int] = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , UpperCAmelCase )
lowercase__ : Any = update_area_of_max_square_using_dp_array(row + 1 , UpperCAmelCase , UpperCAmelCase )
if mat[row][col]:
lowercase__ : Optional[int] = 1 + min([right, diagonal, down] )
lowercase__ : Any = max(largest_square_area[0] , UpperCAmelCase )
lowercase__ : int = sub_problem_sol
return sub_problem_sol
else:
return 0
lowercase__ : Any = [0]
lowercase__ : List[Any] = [[-1] * cols for _ in range(UpperCAmelCase )]
update_area_of_max_square_using_dp_array(0 , 0 , UpperCAmelCase )
return largest_square_area[0]
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Optional[int] = [[0] * (cols + 1) for _ in range(rows + 1 )]
lowercase__ : str = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase__ : str = dp_array[row][col + 1]
lowercase__ : Optional[Any] = dp_array[row + 1][col + 1]
lowercase__ : str = dp_array[row + 1][col]
if mat[row][col] == 1:
lowercase__ : Dict = 1 + min(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase__ : str = max(dp_array[row][col] , UpperCAmelCase )
else:
lowercase__ : Any = 0
return largest_square_area
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowercase__ : List[str] = [0] * (cols + 1)
lowercase__ : str = [0] * (cols + 1)
lowercase__ : Tuple = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase__ : List[Any] = current_row[col + 1]
lowercase__ : Any = next_row[col + 1]
lowercase__ : Optional[Any] = next_row[col]
if mat[row][col] == 1:
lowercase__ : List[str] = 1 + min(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase__ : List[str] = max(current_row[col] , UpperCAmelCase )
else:
lowercase__ : int = 0
lowercase__ : int = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 198 | 0 |
from typing import Dict, Optional
import numpy as np
import datasets
_A = '''
IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union
between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,
the mean IoU of the image is calculated by taking the IoU of each class and averaging them.
'''
_A = '''
Args:
predictions (`List[ndarray]`):
List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
references (`List[ndarray]`):
List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
num_labels (`int`):
Number of classes (categories).
ignore_index (`int`):
Index that will be ignored during evaluation.
nan_to_num (`int`, *optional*):
If specified, NaN values will be replaced by the number defined by the user.
label_map (`dict`, *optional*):
If specified, dictionary mapping old label indices to new label indices.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
`Dict[str, float | ndarray]` comprising various elements:
- *mean_iou* (`float`):
Mean Intersection-over-Union (IoU averaged over all categories).
- *mean_accuracy* (`float`):
Mean accuracy (averaged over all categories).
- *overall_accuracy* (`float`):
Overall accuracy on all images.
- *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):
Per category accuracy.
- *per_category_iou* (`ndarray` of shape `(num_labels,)`):
Per category IoU.
Examples:
>>> import numpy as np
>>> mean_iou = datasets.load_metric("mean_iou")
>>> # suppose one has 3 different segmentation maps predicted
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> predicted = [predicted_1, predicted_2, predicted_3]
>>> ground_truth = [actual_1, actual_2, actual_3]
>>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
'''
_A = '''\
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
author = {{MMSegmentation Contributors}},
license = {Apache-2.0},
month = {7},
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
url = {https://github.com/open-mmlab/mmsegmentation},
year = {2020}
}'''
def __UpperCamelCase ( _A , _A , _A , _A , _A = None , _A = False , ):
if label_map is not None:
for old_id, new_id in label_map.items():
lowerCAmelCase_ = new_id
# turn into Numpy arrays
lowerCAmelCase_ = np.array(_A )
lowerCAmelCase_ = np.array(_A )
if reduce_labels:
lowerCAmelCase_ = 255
lowerCAmelCase_ = label - 1
lowerCAmelCase_ = 255
lowerCAmelCase_ = label != ignore_index
lowerCAmelCase_ = np.not_equal(_A , _A )
lowerCAmelCase_ = pred_label[mask]
lowerCAmelCase_ = np.array(_A )[mask]
lowerCAmelCase_ = pred_label[pred_label == label]
lowerCAmelCase_ = np.histogram(_A , bins=_A , range=(0, num_labels - 1) )[0]
lowerCAmelCase_ = np.histogram(_A , bins=_A , range=(0, num_labels - 1) )[0]
lowerCAmelCase_ = np.histogram(_A , bins=_A , range=(0, num_labels - 1) )[0]
lowerCAmelCase_ = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def __UpperCamelCase ( _A , _A , _A , _A , _A = None , _A = False , ):
lowerCAmelCase_ = np.zeros((num_labels,) , dtype=np.floataa )
lowerCAmelCase_ = np.zeros((num_labels,) , dtype=np.floataa )
lowerCAmelCase_ = np.zeros((num_labels,) , dtype=np.floataa )
lowerCAmelCase_ = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(_A , _A ):
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = intersect_and_union(
_A , _A , _A , _A , _A , _A )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def __UpperCamelCase ( _A , _A , _A , _A , _A = None , _A = None , _A = False , ):
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = total_intersect_and_union(
_A , _A , _A , _A , _A , _A )
# compute metrics
lowerCAmelCase_ = {}
lowerCAmelCase_ = total_area_intersect.sum() / total_area_label.sum()
lowerCAmelCase_ = total_area_intersect / total_area_union
lowerCAmelCase_ = total_area_intersect / total_area_label
lowerCAmelCase_ = np.nanmean(_A )
lowerCAmelCase_ = np.nanmean(_A )
lowerCAmelCase_ = all_acc
lowerCAmelCase_ = iou
lowerCAmelCase_ = acc
if nan_to_num is not None:
lowerCAmelCase_ = {metric: np.nan_to_num(_A , nan=_A ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
'''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
} ), reference_urls=[
'''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'''
], )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ = None, UpperCamelCase__ = None, UpperCamelCase__ = False, ):
"""simple docstring"""
lowerCAmelCase_ = mean_iou(
results=UpperCamelCase__, gt_seg_maps=UpperCamelCase__, num_labels=UpperCamelCase__, ignore_index=UpperCamelCase__, nan_to_num=UpperCamelCase__, label_map=UpperCamelCase__, reduce_labels=UpperCamelCase__, )
return iou_result
| 167 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A ( unittest.TestCase ):
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase_ = UNetaDModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=3, out_channels=3, down_block_types=('''DownBlock2D''', '''AttnDownBlock2D'''), up_block_types=('''AttnUpBlock2D''', '''UpBlock2D'''), )
return model
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.dummy_uncond_unet
lowerCAmelCase_ = KarrasVeScheduler()
lowerCAmelCase_ = KarrasVePipeline(unet=UpperCamelCase__, scheduler=UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(num_inference_steps=2, generator=UpperCamelCase__, output_type='''numpy''' ).images
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(num_inference_steps=2, generator=UpperCamelCase__, output_type='''numpy''', return_dict=UpperCamelCase__ )[0]
lowerCAmelCase_ = image[0, -3:, -3:, -1]
lowerCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase_ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class A ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = '''google/ncsnpp-celebahq-256'''
lowerCAmelCase_ = UNetaDModel.from_pretrained(UpperCamelCase__ )
lowerCAmelCase_ = KarrasVeScheduler()
lowerCAmelCase_ = KarrasVePipeline(unet=UpperCamelCase__, scheduler=UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(num_inference_steps=20, generator=UpperCamelCase__, output_type='''numpy''' ).images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCAmelCase_ = np.array([0.578, 0.5_811, 0.5_924, 0.5_809, 0.587, 0.5_886, 0.5_861, 0.5_802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 167 | 1 |
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase =get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class a__ ( UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase : Optional[int] =XLMRobertaTokenizer
lowerCamelCase : Optional[int] =XLMRobertaTokenizerFast
lowerCamelCase : Dict =True
lowerCamelCase : Any =True
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase = XLMRobertaTokenizer(a , keep_accents=a )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
__lowerCamelCase = """<pad>"""
__lowerCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(a ) , 10_02 )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_02 )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
__lowerCamelCase = XLMRobertaTokenizer(a , keep_accents=a )
__lowerCamelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(a , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
__lowerCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__lowerCamelCase = tokenizer.convert_tokens_to_ids(a )
self.assertListEqual(
a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__lowerCamelCase = (self.rust_tokenizer_class, """hf-internal-testing/tiny-xlm-roberta""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__lowerCamelCase = self.rust_tokenizer_class.from_pretrained(a , **a )
__lowerCamelCase = self.tokenizer_class.from_pretrained(a , **a )
__lowerCamelCase = tempfile.mkdtemp()
__lowerCamelCase = tokenizer_r.save_pretrained(a )
__lowerCamelCase = tokenizer_p.save_pretrained(a )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
__lowerCamelCase = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(a , a )
# Checks everything loads correctly in the same way
__lowerCamelCase = tokenizer_r.from_pretrained(a )
__lowerCamelCase = tokenizer_p.from_pretrained(a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(a , a ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(a )
# Save tokenizer rust, legacy_format=True
__lowerCamelCase = tempfile.mkdtemp()
__lowerCamelCase = tokenizer_r.save_pretrained(a , legacy_format=a )
__lowerCamelCase = tokenizer_p.save_pretrained(a )
# Checks it save with the same files
self.assertSequenceEqual(a , a )
# Checks everything loads correctly in the same way
__lowerCamelCase = tokenizer_r.from_pretrained(a )
__lowerCamelCase = tokenizer_p.from_pretrained(a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(a , a ) )
shutil.rmtree(a )
# Save tokenizer rust, legacy_format=False
__lowerCamelCase = tempfile.mkdtemp()
__lowerCamelCase = tokenizer_r.save_pretrained(a , legacy_format=a )
__lowerCamelCase = tokenizer_p.save_pretrained(a )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__lowerCamelCase = tokenizer_r.from_pretrained(a )
__lowerCamelCase = tokenizer_p.from_pretrained(a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(a , a ) )
shutil.rmtree(a )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
return XLMRobertaTokenizer.from_pretrained('''xlm-roberta-base''' )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(a , f.name )
__lowerCamelCase = XLMRobertaTokenizer(f.name , keep_accents=a )
__lowerCamelCase = pickle.dumps(a )
pickle.loads(a )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = self.get_rust_tokenizer()
__lowerCamelCase = """I was born in 92000, and this is falsé."""
__lowerCamelCase = tokenizer.tokenize(a )
__lowerCamelCase = rust_tokenizer.tokenize(a )
self.assertListEqual(a , a )
__lowerCamelCase = tokenizer.encode(a , add_special_tokens=a )
__lowerCamelCase = rust_tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
__lowerCamelCase = self.get_rust_tokenizer()
__lowerCamelCase = tokenizer.encode(a )
__lowerCamelCase = rust_tokenizer.encode(a )
self.assertListEqual(a , a )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = """Hello World!"""
__lowerCamelCase = [0, 3_53_78, 66_61, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(a , self.big_tokenizer.encode(a ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
__lowerCamelCase = [
0,
32_93,
83,
10,
45_52,
49_89,
79_86,
6_78,
10,
59_15,
1_11,
17_94_59,
12_48_50,
4,
60_44,
2_37,
12,
6,
5,
6,
4,
67_80,
7_05,
15,
13_88,
44,
3_78,
1_01_14,
7_11,
1_52,
20,
6,
5,
2_23_76,
6_42,
12_21,
1_51_90,
3_41_53,
4_50,
56_08,
9_59,
11_19,
5_77_02,
1_36,
1_86,
47,
10_98,
2_93_67,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
60_44,
2_37,
62_84,
5_09_01,
5_28,
31,
90,
34,
9_27,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(a , self.big_tokenizer.encode(a ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
__lowerCamelCase = {"""input_ids""": [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a , model_name='''xlm-roberta-base''' , revision='''d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3''' , )
| 67 |
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list:
if len(lowercase ) != 2 or len(a[0] ) != 2 or len(lowercase ) != 2 or len(b[0] ) != 2:
raise Exception("""Matrices are not 2x2""" )
snake_case : int = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> int:
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowercase ) )
]
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> str:
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowercase ) )
]
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> tuple[list, list, list, list]:
if len(lowercase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("""Odd matrices are not supported!""" )
snake_case : Optional[int] = len(lowercase )
snake_case : str = matrix_length // 2
snake_case : int = [[a[i][j] for j in range(lowercase ,lowercase )] for i in range(lowercase )]
snake_case : str = [
[a[i][j] for j in range(lowercase ,lowercase )] for i in range(lowercase ,lowercase )
]
snake_case : Optional[Any] = [[a[i][j] for j in range(lowercase )] for i in range(lowercase )]
snake_case : str = [[a[i][j] for j in range(lowercase )] for i in range(lowercase ,lowercase )]
return top_left, top_right, bot_left, bot_right
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> tuple[int, int]:
return len(lowercase ), len(matrix[0] )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> None:
print("""\n""".join(str(lowercase ) for line in matrix ) )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list:
if matrix_dimensions(lowercase ) == (2, 2):
return default_matrix_multiplication(lowercase ,lowercase )
snake_case , snake_case , snake_case , snake_case : Optional[Any] = split_matrix(lowercase )
snake_case , snake_case , snake_case , snake_case : Any = split_matrix(lowercase )
snake_case : List[Any] = actual_strassen(lowercase ,matrix_subtraction(lowercase ,lowercase ) )
snake_case : List[str] = actual_strassen(matrix_addition(lowercase ,lowercase ) ,lowercase )
snake_case : Tuple = actual_strassen(matrix_addition(lowercase ,lowercase ) ,lowercase )
snake_case : str = actual_strassen(lowercase ,matrix_subtraction(lowercase ,lowercase ) )
snake_case : Union[str, Any] = actual_strassen(matrix_addition(lowercase ,lowercase ) ,matrix_addition(lowercase ,lowercase ) )
snake_case : int = actual_strassen(matrix_subtraction(lowercase ,lowercase ) ,matrix_addition(lowercase ,lowercase ) )
snake_case : List[Any] = actual_strassen(matrix_subtraction(lowercase ,lowercase ) ,matrix_addition(lowercase ,lowercase ) )
snake_case : str = matrix_addition(matrix_subtraction(matrix_addition(lowercase ,lowercase ) ,lowercase ) ,lowercase )
snake_case : List[str] = matrix_addition(lowercase ,lowercase )
snake_case : Any = matrix_addition(lowercase ,lowercase )
snake_case : List[str] = matrix_subtraction(matrix_subtraction(matrix_addition(lowercase ,lowercase ) ,lowercase ) ,lowercase )
# construct the new matrix from our 4 quadrants
snake_case : Optional[Any] = []
for i in range(len(lowercase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(lowercase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list:
if matrix_dimensions(lowercase )[1] != matrix_dimensions(lowercase )[0]:
snake_case : Optional[Any] = (
"""Unable to multiply these matrices, please check the dimensions.\n"""
f"""Matrix A: {matrixa}\n"""
f"""Matrix B: {matrixa}"""
)
raise Exception(lowercase )
snake_case : str = matrix_dimensions(lowercase )
snake_case : Optional[Any] = matrix_dimensions(lowercase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
snake_case : Dict = max(*lowercase ,*lowercase )
snake_case : Optional[Any] = int(math.pow(2 ,math.ceil(math.loga(lowercase ) ) ) )
snake_case : Any = matrixa
snake_case : Optional[Any] = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 ,lowercase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] ,lowercase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] ,lowercase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
snake_case : Optional[int] = actual_strassen(lowercase ,lowercase )
# Removing the additional zeros
for i in range(0 ,lowercase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] ,lowercase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
lowerCamelCase : Any = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
lowerCamelCase : int = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]]
print(strassen(matrixa, matrixa))
| 124 | 0 |
from __future__ import annotations
import math
__lowerCamelCase : Tuple = """2020.9.26"""
__lowerCamelCase : Tuple = """xcodz-dot, cclaus, dhruvmanila"""
def SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : float , snake_case_ : float , snake_case_ : float , snake_case_ : float ):
if not all(isinstance(snake_case_ , (float, int) ) for val in locals().values() ):
snake_case__ : Optional[int] = F'''Input values must either be float or int: {list(locals().values() )}'''
raise TypeError(snake_case_ )
snake_case__ : Optional[int] = ((x * distance) / (z + distance)) * scale
snake_case__ : Any = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : float , snake_case_ : float , snake_case_ : str , snake_case_ : float ):
if not isinstance(snake_case_ , snake_case_ ):
raise TypeError("Axis must be a str" )
snake_case__ : int = locals()
del input_variables["axis"]
if not all(isinstance(snake_case_ , (float, int) ) for val in input_variables.values() ):
snake_case__ : Union[str, Any] = (
"Input values except axis must either be float or int: "
F'''{list(input_variables.values() )}'''
)
raise TypeError(snake_case_ )
snake_case__ : Optional[Any] = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
snake_case__ : str = x * math.cos(snake_case_ ) - y * math.sin(snake_case_ )
snake_case__ : Dict = y * math.cos(snake_case_ ) + x * math.sin(snake_case_ )
snake_case__ : Optional[int] = z
elif axis == "x":
snake_case__ : List[Any] = y * math.cos(snake_case_ ) - z * math.sin(snake_case_ )
snake_case__ : List[Any] = z * math.cos(snake_case_ ) + y * math.sin(snake_case_ )
snake_case__ : Any = x
elif axis == "y":
snake_case__ : Optional[Any] = x * math.cos(snake_case_ ) - z * math.sin(snake_case_ )
snake_case__ : int = z * math.cos(snake_case_ ) + x * math.sin(snake_case_ )
snake_case__ : int = y
else:
raise ValueError("not a valid axis, choose one of 'x', 'y', 'z'" )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }")
print(f"{rotate(1.0, 2.0, 3.0, 'y', 90.0) = }")
| 368 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
for param in module.parameters():
snake_case__ : Tuple = False
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Any = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
snake_case__ : List[Any] = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
snake_case__ : List[str] = plt.imshow(snake_case_ )
fig.axes.get_xaxis().set_visible(snake_case_ )
fig.axes.get_yaxis().set_visible(snake_case_ )
plt.show()
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : str = datetime.now()
snake_case__ : Optional[Any] = current_time.strftime("%H:%M:%S" )
return timestamp
| 286 | 0 |
'''simple docstring'''
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ = False ) -> Any:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase__ : Union[str, Any] = F"""Expected string as input, found {type(lowerCAmelCase_ )}"""
raise ValueError(lowerCAmelCase_ )
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase__ : int = F"""Expected boolean as use_pascal parameter, found {type(lowerCAmelCase_ )}"""
raise ValueError(lowerCAmelCase_ )
UpperCAmelCase__ : str = input_str.split('''_''' )
UpperCAmelCase__ : Any = 0 if use_pascal else 1
UpperCAmelCase__ : int = words[start_index:]
UpperCAmelCase__ : List[str] = [word[0].upper() + word[1:] for word in words_to_capitalize]
UpperCAmelCase__ : List[str] = '''''' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 181 |
def a_ ( lowerCAmelCase_ : int ):
if number < 0:
raise ValueError('number must not be negative' )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 284 | 0 |
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : list ) -> Optional[int]:
UpperCAmelCase_ = len(__UpperCamelCase )
for i in range(1 , __UpperCamelCase ):
UpperCAmelCase_ = collection[i]
UpperCAmelCase_ = 0
UpperCAmelCase_ = i - 1
while low <= high:
UpperCAmelCase_ = (low + high) // 2
if val < collection[mid]:
UpperCAmelCase_ = mid - 1
else:
UpperCAmelCase_ = mid + 1
for j in range(__UpperCamelCase , __UpperCamelCase , -1 ):
UpperCAmelCase_ = collection[j - 1]
UpperCAmelCase_ = val
return collection
if __name__ == "__main__":
_lowerCamelCase = input('Enter numbers separated by a comma:\n').strip()
_lowerCamelCase = [int(item) for item in user_input.split(',')]
print(binary_insertion_sort(unsorted))
| 360 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json',
}
class a ( _A ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = 'data2vec-text'
def __init__( self : Optional[Any] , __snake_case : Optional[int]=3_05_22 , __snake_case : List[str]=7_68 , __snake_case : Tuple=12 , __snake_case : int=12 , __snake_case : Union[str, Any]=30_72 , __snake_case : List[Any]="gelu" , __snake_case : Any=0.1 , __snake_case : Union[str, Any]=0.1 , __snake_case : Tuple=5_12 , __snake_case : str=2 , __snake_case : str=0.02 , __snake_case : List[Any]=1E-12 , __snake_case : Any=1 , __snake_case : List[Any]=0 , __snake_case : Dict=2 , __snake_case : Any="absolute" , __snake_case : Union[str, Any]=True , __snake_case : Any=None , **__snake_case : List[Any] , ):
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = classifier_dropout
class a ( _A ):
'''simple docstring'''
@property
def lowerCamelCase_ ( self : str ):
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase_ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 177 | 0 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
class __snake_case ( _lowercase):
snake_case__ : Union[str, Any] = "upernet"
def __init__( self : Dict , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Union[str, Any]=5_1_2 , __lowerCAmelCase : Any=0.02 , __lowerCAmelCase : List[Any]=[1, 2, 3, 6] , __lowerCAmelCase : Any=True , __lowerCAmelCase : str=0.4 , __lowerCAmelCase : Union[str, Any]=3_8_4 , __lowerCAmelCase : Optional[int]=2_5_6 , __lowerCAmelCase : Optional[int]=1 , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Optional[Any]=2_5_5 , **__lowerCAmelCase : List[str] , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
_lowerCamelCase : Union[str, Any] = CONFIG_MAPPING['''resnet'''](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_lowerCamelCase : Any = backbone_config.get('''model_type''' )
_lowerCamelCase : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
_lowerCamelCase : int = config_class.from_dict(__lowerCAmelCase )
_lowerCamelCase : List[Any] = backbone_config
_lowerCamelCase : Tuple = hidden_size
_lowerCamelCase : Any = initializer_range
_lowerCamelCase : Optional[Any] = pool_scales
_lowerCamelCase : List[Any] = use_auxiliary_head
_lowerCamelCase : Dict = auxiliary_loss_weight
_lowerCamelCase : str = auxiliary_in_channels
_lowerCamelCase : Optional[int] = auxiliary_channels
_lowerCamelCase : Optional[int] = auxiliary_num_convs
_lowerCamelCase : str = auxiliary_concat_input
_lowerCamelCase : Dict = loss_ignore_index
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = copy.deepcopy(self.__dict__ )
_lowerCamelCase : Union[str, Any] = self.backbone_config.to_dict()
_lowerCamelCase : List[Any] = self.__class__.model_type
return output
| 72 |
"""simple docstring"""
lowerCAmelCase__ = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def snake_case_ ( A_ : dict, A_ : int, A_ : int ):
'''simple docstring'''
_lowerCamelCase : List[str] = set()
# keep track of all the paths to be checked
_lowerCamelCase : str = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
_lowerCamelCase : str = queue.pop(0 )
# get the last node from the path
_lowerCamelCase : List[Any] = path[-1]
if node not in explored:
_lowerCamelCase : Union[str, Any] = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
_lowerCamelCase : Union[str, Any] = list(A_ )
new_path.append(A_ )
queue.append(A_ )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(A_ )
# in case there's no path between the 2 nodes
return []
def snake_case_ ( A_ : dict, A_ : int, A_ : Dict ):
'''simple docstring'''
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
_lowerCamelCase : Optional[int] = [start]
_lowerCamelCase : int = set(A_ )
# Keep tab on distances from `start` node.
_lowerCamelCase : int = {start: 0, target: -1}
while queue:
_lowerCamelCase : Optional[Any] = queue.pop(0 )
if node == target:
_lowerCamelCase : Any = (
dist[node] if dist[target] == -1 else min(dist[target], dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(A_ )
queue.append(A_ )
_lowerCamelCase : Any = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 72 | 1 |
def __lowerCamelCase ( lowerCamelCase__ = 10 , lowerCamelCase__ = 1_000 , lowerCamelCase__ = True ):
"""simple docstring"""
assert (
isinstance(lowerCamelCase__ , lowerCamelCase__ )
and isinstance(lowerCamelCase__ , lowerCamelCase__ )
and isinstance(lowerCamelCase__ , lowerCamelCase__ )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)" )
return min_val if option else max_val
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
return int((number_a + number_a) / 2 )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
assert (
isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)" )
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value" )
def answer(lowerCamelCase__ ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started..." )
lowercase__ : str = lower
lowercase__ : List[Any] = higher
lowercase__ : List[str] = []
while True:
lowercase__ : Any = get_avg(lowerCamelCase__ , lowerCamelCase__ )
last_numbers.append(lowerCamelCase__ )
if answer(lowerCamelCase__ ) == "low":
lowercase__ : Dict = number
elif answer(lowerCamelCase__ ) == "high":
lowercase__ : List[str] = number
else:
break
print(F"""guess the number : {last_numbers[-1]}""" )
print(F"""details : {last_numbers!s}""" )
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Dict = int(input("Enter lower value : " ).strip() )
lowercase__ : int = int(input("Enter high value : " ).strip() )
lowercase__ : Any = int(input("Enter value to guess : " ).strip() )
guess_the_number(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
main()
| 121 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''NllbTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''NllbTokenizerFast''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 121 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( __A = 1_000_000 ) -> int:
_snake_case = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , __A ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 42 |
'''simple docstring'''
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class __UpperCAmelCase ( _lowerCamelCase ):
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
return 0.0
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> tuple[int | float, int | float]:
_snake_case = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
_snake_case = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> None:
_snake_case = 512
_snake_case = [1] + [0] * (size - 1)
_snake_case = [filter_type.process(__A ) for item in inputs]
_snake_case = [0] * (samplerate - size) # zero-padding
outputs += filler
_snake_case = np.abs(np.fft.fft(__A ) )
_snake_case = 20 * np.logaa(__A )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
# Display within reasonable bounds
_snake_case = get_bounds(__A , __A )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel('Gain (dB)' )
plt.plot(__A )
plt.show()
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> None:
_snake_case = 512
_snake_case = [1] + [0] * (size - 1)
_snake_case = [filter_type.process(__A ) for item in inputs]
_snake_case = [0] * (samplerate - size) # zero-padding
outputs += filler
_snake_case = np.angle(np.fft.fft(__A ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('Phase shift (Radians)' )
plt.plot(np.unwrap(__A , -2 * pi ) )
plt.show()
| 42 | 1 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int]=13 , lowerCAmelCase : Union[str, Any]=32 , lowerCAmelCase : List[Any]=3 , lowerCAmelCase : int=4 , lowerCAmelCase : int=[10, 20, 30, 40] , lowerCAmelCase : List[Any]=[2, 2, 3, 2] , lowerCAmelCase : Any=True , lowerCAmelCase : List[Any]=True , lowerCAmelCase : Dict=37 , lowerCAmelCase : List[Any]="gelu" , lowerCAmelCase : str=10 , lowerCAmelCase : List[Any]=0.02 , lowerCAmelCase : str=["stage2", "stage3", "stage4"] , lowerCAmelCase : List[Any]=3 , lowerCAmelCase : Tuple=None , ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : int = parent
__lowerCAmelCase : List[Any] = batch_size
__lowerCAmelCase : Optional[int] = image_size
__lowerCAmelCase : Tuple = num_channels
__lowerCAmelCase : str = num_stages
__lowerCAmelCase : str = hidden_sizes
__lowerCAmelCase : Optional[Any] = depths
__lowerCAmelCase : Optional[Any] = is_training
__lowerCAmelCase : int = use_labels
__lowerCAmelCase : int = intermediate_size
__lowerCAmelCase : Optional[int] = hidden_act
__lowerCAmelCase : List[Any] = type_sequence_label_size
__lowerCAmelCase : Optional[int] = initializer_range
__lowerCAmelCase : Dict = out_features
__lowerCAmelCase : List[Any] = num_labels
__lowerCAmelCase : Union[str, Any] = scope
__lowerCAmelCase : Union[str, Any] = num_stages
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase : Tuple = None
if self.use_labels:
__lowerCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase : List[Any] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
"""simple docstring"""
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_12 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=lowerCAmelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=2_56 , auxiliary_num_convs=1 , auxiliary_concat_input=lowerCAmelCase , loss_ignore_index=2_55 , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : List[Any] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : List[Any] = UperNetForSemanticSegmentation(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : Tuple = model(lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.prepare_config_and_inputs()
(
__lowerCAmelCase
) : Dict = config_and_inputs
__lowerCAmelCase : Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Any =(UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowerCamelCase : List[str] ={"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {}
lowerCamelCase : Dict =False
lowerCamelCase : Union[str, Any] =False
lowerCamelCase : Union[str, Any] =False
lowerCamelCase : str =False
lowerCamelCase : List[Any] =False
lowerCamelCase : Optional[Any] =False
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : List[Any] = UperNetModelTester(self )
__lowerCAmelCase : Dict = ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
"""simple docstring"""
return
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : Optional[int] = model_class(lowerCAmelCase )
__lowerCAmelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase : str = [*signature.parameters.keys()]
__lowerCAmelCase : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCAmelCase )
@unittest.skip(reason="""UperNet does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason="""UperNet does not support input and output embeddings""" )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any] ):
__lowerCAmelCase : int = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
__lowerCAmelCase : Optional[int] = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
__lowerCAmelCase : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCAmelCase : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : Dict = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase : str = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : Any = _config_zero_init(lowerCAmelCase )
__lowerCAmelCase : List[str] = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
__lowerCAmelCase : str = model_class(config=lowerCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip(reason="""UperNet does not have tied weights""" )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
"""simple docstring"""
pass
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : int = UperNetForSemanticSegmentation.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def snake_case_ () -> int:
__lowerCAmelCase : int = hf_hub_download(
repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" )
__lowerCAmelCase : str = Image.open(__A ).convert("""RGB""" )
return image
@require_torch
@require_vision
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
"""simple docstring"""
__lowerCAmelCase : str = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" )
__lowerCAmelCase : Any = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(lowerCAmelCase )
__lowerCAmelCase : List[str] = prepare_img()
__lowerCAmelCase : int = processor(images=lowerCAmelCase , return_tensors="""pt""" ).to(lowerCAmelCase )
with torch.no_grad():
__lowerCAmelCase : Any = model(**lowerCAmelCase )
__lowerCAmelCase : Any = torch.Size((1, model.config.num_labels, 5_12, 5_12) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
__lowerCAmelCase : Optional[int] = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCAmelCase , atol=1e-4 ) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Tuple = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" )
__lowerCAmelCase : Optional[int] = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(lowerCAmelCase )
__lowerCAmelCase : Optional[Any] = prepare_img()
__lowerCAmelCase : List[Any] = processor(images=lowerCAmelCase , return_tensors="""pt""" ).to(lowerCAmelCase )
with torch.no_grad():
__lowerCAmelCase : Union[str, Any] = model(**lowerCAmelCase )
__lowerCAmelCase : List[Any] = torch.Size((1, model.config.num_labels, 5_12, 5_12) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
__lowerCAmelCase : List[str] = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCAmelCase , atol=1e-4 ) )
| 355 |
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCAmelCase : Dict , lowerCAmelCase : Dict=13 , lowerCAmelCase : int=7 , lowerCAmelCase : Any=True , lowerCAmelCase : str=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : str=True , lowerCAmelCase : Tuple=99 , lowerCAmelCase : int=64 , lowerCAmelCase : Any=32 , lowerCAmelCase : str=5 , lowerCAmelCase : List[str]=4 , lowerCAmelCase : str=37 , lowerCAmelCase : Union[str, Any]="gelu" , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Optional[int]=5_12 , lowerCAmelCase : List[str]=16 , lowerCAmelCase : str=2 , lowerCAmelCase : Union[str, Any]=0.02 , lowerCAmelCase : Dict=3 , lowerCAmelCase : int=4 , lowerCAmelCase : Union[str, Any]=None , ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = parent
__lowerCAmelCase : Tuple = batch_size
__lowerCAmelCase : Dict = seq_length
__lowerCAmelCase : List[str] = is_training
__lowerCAmelCase : Dict = use_input_mask
__lowerCAmelCase : Optional[int] = use_token_type_ids
__lowerCAmelCase : List[str] = use_labels
__lowerCAmelCase : Dict = vocab_size
__lowerCAmelCase : List[str] = hidden_size
__lowerCAmelCase : Optional[int] = embedding_size
__lowerCAmelCase : Optional[int] = num_hidden_layers
__lowerCAmelCase : Optional[Any] = num_attention_heads
__lowerCAmelCase : Optional[Any] = intermediate_size
__lowerCAmelCase : Optional[int] = hidden_act
__lowerCAmelCase : Any = hidden_dropout_prob
__lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
__lowerCAmelCase : List[str] = max_position_embeddings
__lowerCAmelCase : Optional[Any] = type_vocab_size
__lowerCAmelCase : Optional[Any] = type_sequence_label_size
__lowerCAmelCase : Optional[Any] = initializer_range
__lowerCAmelCase : Optional[Any] = num_labels
__lowerCAmelCase : Union[str, Any] = num_choices
__lowerCAmelCase : Union[str, Any] = scope
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : Dict = None
if self.use_input_mask:
__lowerCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : List[str] = None
if self.use_token_type_ids:
__lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase : List[Any] = None
__lowerCAmelCase : Tuple = None
__lowerCAmelCase : int = None
if self.use_labels:
__lowerCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : Any , lowerCAmelCase : Tuple , lowerCAmelCase : List[Any] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Any = MobileBertModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : Union[str, Any] = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase )
__lowerCAmelCase : List[Any] = model(lowerCAmelCase , token_type_ids=lowerCAmelCase )
__lowerCAmelCase : Tuple = model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : str , lowerCAmelCase : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = MobileBertForMaskedLM(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : List[Any] = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Tuple = MobileBertForNextSentencePrediction(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : List[Any] = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase : Any , lowerCAmelCase : Any , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : List[Any] , lowerCAmelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = MobileBertForPreTraining(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : Optional[int] = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , next_sentence_label=lowerCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : int , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any ) -> Any:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = MobileBertForQuestionAnswering(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : int = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.num_labels
__lowerCAmelCase : int = MobileBertForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : Optional[int] = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : Any , lowerCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.num_labels
__lowerCAmelCase : Dict = MobileBertForTokenClassification(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : Tuple = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = self.num_choices
__lowerCAmelCase : List[Any] = MobileBertForMultipleChoice(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : Optional[int] = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,
) : List[Any] = config_and_inputs
__lowerCAmelCase : List[str] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : str =(
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase : Optional[int] =(
{
"feature-extraction": MobileBertModel,
"fill-mask": MobileBertForMaskedLM,
"question-answering": MobileBertForQuestionAnswering,
"text-classification": MobileBertForSequenceClassification,
"token-classification": MobileBertForTokenClassification,
"zero-shot": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase : Union[str, Any] =True
def SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase : str , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any]=False ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
if return_labels:
if model_class in get_values(lowerCAmelCase ):
__lowerCAmelCase : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : int = MobileBertModelTester(self )
__lowerCAmelCase : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCAmelCase )
def snake_case_ (__A : Any ) -> Optional[Any]:
return torch.tensor(
__A , dtype=torch.long , device=__A , )
__UpperCAmelCase = 1e-3
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = MobileBertModel.from_pretrained("""google/mobilebert-uncased""" ).to(lowerCAmelCase )
__lowerCAmelCase : int = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
__lowerCAmelCase : List[str] = model(lowerCAmelCase )[0]
__lowerCAmelCase : List[Any] = torch.Size((1, 9, 5_12) )
self.assertEqual(output.shape , lowerCAmelCase )
__lowerCAmelCase : int = torch.tensor(
[
[
[-2.473_6526e07, 8.269_1656e04, 1.652_1838e05],
[-5.754_1704e-01, 3.905_6022e00, 4.401_1507e00],
[2.604_7359e00, 1.567_7652e00, -1.732_4188e-01],
]
] , device=lowerCAmelCase , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
__lowerCAmelCase : Union[str, Any] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
__lowerCAmelCase : Union[str, Any] = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 139 | 0 |
"""simple docstring"""
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ : Optional[int] = logging.get_logger(__name__)
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError('Quantized models are not supported.' )
UpperCAmelCase__ = re.match(r'^mobilenet_v1_([^_]*)_([^_]*)$' , lowerCamelCase )
if matches:
UpperCAmelCase__ = float(matches[1] )
UpperCAmelCase__ = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
UpperCAmelCase__ = 1_0_0_1
UpperCAmelCase__ = 'imagenet-1k-id2label.json'
UpperCAmelCase__ = 'huggingface/label-files'
UpperCAmelCase__ = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type='dataset' ) , 'r' ) )
UpperCAmelCase__ = {int(lowerCamelCase ) + 1: v for k, v in idalabel.items()}
UpperCAmelCase__ = 'background'
UpperCAmelCase__ = idalabel
UpperCAmelCase__ = {v: k for k, v in idalabel.items()}
return config
def a_ ( ):
UpperCAmelCase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCAmelCase__ = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=False ):
UpperCAmelCase__ = get_mobilenet_va_config(lowerCamelCase )
# Load 🤗 model
UpperCAmelCase__ = MobileNetVaForImageClassification(lowerCamelCase ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
UpperCAmelCase__ = MobileNetVaImageProcessor(
crop_size={'width': config.image_size, 'height': config.image_size} , size={'shortest_edge': config.image_size + 3_2} , )
UpperCAmelCase__ = image_processor(images=prepare_img() , return_tensors='pt' )
UpperCAmelCase__ = model(**lowerCamelCase )
UpperCAmelCase__ = outputs.logits
assert logits.shape == (1, 1_0_0_1)
if model_name == "mobilenet_v1_1.0_224":
UpperCAmelCase__ = torch.tensor([-4.1739, -1.1233, 3.1205] )
elif model_name == "mobilenet_v1_0.75_192":
UpperCAmelCase__ = torch.tensor([-3.9440, -2.3141, -0.3333] )
else:
UpperCAmelCase__ = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , lowerCamelCase , atol=1e-4 )
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowerCamelCase )
if push_to_hub:
print('Pushing to the hub...' )
UpperCAmelCase__ = 'google/' + model_name
image_processor.push_to_hub(lowerCamelCase )
model.push_to_hub(lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='mobilenet_v1_1.0_224',
type=str,
help='Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.',
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original TensorFlow checkpoint (.ckpt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
lowerCAmelCase__ : Dict = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 98 |
def __UpperCamelCase ( _A = 1000000 ):
lowerCAmelCase_ = 1
lowerCAmelCase_ = 1
lowerCAmelCase_ = {1: 1}
for inputa in range(2 , _A ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
lowerCAmelCase_ = (3 * number) + 1
counter += 1
if inputa not in counters:
lowerCAmelCase_ = counter
if counter > pre_counter:
lowerCAmelCase_ = inputa
lowerCAmelCase_ = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 278 | 0 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = FileLock(str(tmpdir / "foo.lock" ) )
UpperCAmelCase_ = FileLock(str(tmpdir / "foo.lock" ) )
UpperCAmelCase_ = 0.01
with locka.acquire():
with pytest.raises(snake_case_ ):
UpperCAmelCase_ = time.time()
locka.acquire(snake_case_ )
assert time.time() - _start > timeout
def lowerCAmelCase_ ( snake_case_ : int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = "a" * 10_00 + ".lock"
UpperCAmelCase_ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(snake_case_ )
assert len(os.path.basename(locka._lock_file ) ) <= 2_55
UpperCAmelCase_ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(snake_case_ ):
locka.acquire(0 )
| 106 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE_: int ={
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: List[str] =['MaskFormerFeatureExtractor']
SCREAMING_SNAKE_CASE_: Union[str, Any] =['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Dict =[
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
SCREAMING_SNAKE_CASE_: List[str] =[
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_: Dict =_LazyModule(__name__, globals()['__file__'], _import_structure)
| 106 | 1 |
from __future__ import annotations
import math
def lowerCamelCase__ ( a ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(a ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
UpperCAmelCase__ : Optional[int] = [num for num in range(3, 100001, 2) if not is_prime(num)]
def lowerCamelCase__ ( a ) -> list[int]:
if not isinstance(a , a ):
raise ValueError('''n must be an integer''' )
if n <= 0:
raise ValueError('''n must be >= 0''' )
_A: Dict = []
for num in range(len(a ) ):
_A: int = 0
while 2 * i * i <= odd_composites[num]:
_A: Union[str, Any] = odd_composites[num] - 2 * i * i
if is_prime(a ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(a ) == n:
return list_nums
return []
def lowerCamelCase__ ( ) -> int:
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 121 |
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
UpperCAmelCase__ : Union[str, Any] = TypeVar('T')
class UpperCAmelCase ( Generic[T] ):
'''simple docstring'''
__UpperCamelCase : deque[T] # Cache store of keys
__UpperCamelCase : set[T] # References of the keys in cache
__UpperCamelCase : int = 10 # Maximum capacity of cache
def __init__( self : List[str] , lowerCAmelCase_ : int ):
"""simple docstring"""
_A: Tuple = deque()
_A: List[Any] = set()
if not n:
_A: str = sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''' )
else:
_A: Dict = n
def __magic_name__ ( self : Optional[int] , lowerCAmelCase_ : T ):
"""simple docstring"""
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
_A: Optional[Any] = self.dq_store.pop()
self.key_reference.remove(lowerCAmelCase_ )
else:
self.dq_store.remove(lowerCAmelCase_ )
self.dq_store.appendleft(lowerCAmelCase_ )
self.key_reference.add(lowerCAmelCase_ )
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
for k in self.dq_store:
print(lowerCAmelCase_ )
def __repr__( self : Dict ):
"""simple docstring"""
return F"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ : LRUCache[str | int] = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 121 | 1 |
'''simple docstring'''
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
A : Optional[Any] = '''\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
'''
A : int = '''\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper "Evaluating Large Language Models Trained on Code"
(https://arxiv.org/abs/2107.03374).
'''
A : Tuple = '''
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric("code_eval")
>>> test_cases = ["assert add(2,3)==5"]
>>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{\'pass@1\': 0.5, \'pass@2\': 1.0}
'''
A : Optional[Any] = '''
################################################################################
!!!WARNING!!!
################################################################################
The "code_eval" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper "Evaluating Large
Language Models Trained on Code" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this
with:
>>> import os
>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"
################################################################################\
'''
A : List[str] = '''The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
"""simple docstring"""
def A ( self : Union[str, Any]):
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string')),
'references': datasets.Value('string'),
}) , homepage='https://github.com/openai/human-eval' , codebase_urls=['https://github.com/openai/human-eval'] , reference_urls=['https://github.com/openai/human-eval'] , license=_LICENSE , )
def A ( self : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any]=[1, 10, 100] , SCREAMING_SNAKE_CASE : List[Any]=4 , SCREAMING_SNAKE_CASE : str=3.0):
if os.getenv('HF_ALLOW_CODE_EVAL' , 0) != "1":
raise ValueError(_WARNING)
if os.name == "nt":
raise NotImplementedError('This metric is currently not supported on Windows.')
with ThreadPoolExecutor(max_workers=SCREAMING_SNAKE_CASE) as executor:
_A : str = []
_A : int = Counter()
_A : Dict = 0
_A : Dict = defaultdict(SCREAMING_SNAKE_CASE)
for task_id, (candidates, test_case) in enumerate(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)):
for candidate in candidates:
_A : int = candidate + '\n' + test_case
_A : Union[str, Any] = (test_program, timeout, task_id, completion_id[task_id])
_A : Any = executor.submit(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE)
futures.append(SCREAMING_SNAKE_CASE)
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(SCREAMING_SNAKE_CASE):
_A : List[Any] = future.result()
results[result["task_id"]].append((result['completion_id'], result))
_A : int = [], []
for result in results.values():
result.sort()
_A : Optional[Any] = [r[1]['passed'] for r in result]
total.append(len(SCREAMING_SNAKE_CASE))
correct.append(sum(SCREAMING_SNAKE_CASE))
_A : List[str] = np.array(SCREAMING_SNAKE_CASE)
_A : Optional[Any] = np.array(SCREAMING_SNAKE_CASE)
_A : int = k
_A : Union[str, Any] = {F'pass@{k}': estimate_pass_at_k(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def lowerCAmelCase__ ( lowerCamelCase : Tuple ,lowerCamelCase : int ,lowerCamelCase : Any ):
def estimator(lowerCamelCase : int ,lowerCamelCase : int ,lowerCamelCase : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 ,n + 1 ) )
if isinstance(lowerCamelCase ,lowerCamelCase ):
_A : Dict = itertools.repeat(lowerCamelCase ,len(lowerCamelCase ) )
else:
assert len(lowerCamelCase ) == len(lowerCamelCase )
_A : Any = iter(lowerCamelCase )
return np.array([estimator(int(lowerCamelCase ) ,int(lowerCamelCase ) ,lowerCamelCase ) for n, c in zip(lowerCamelCase ,lowerCamelCase )] )
| 352 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A : Optional[int] = logging.get_logger(__name__)
A : Union[str, Any] = torch.device('''cpu''')
def lowerCAmelCase__ ( ):
_A : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_A : Dict = Image.open(requests.get(lowerCamelCase ,stream=lowerCamelCase ).raw )
return im
def lowerCAmelCase__ ( lowerCamelCase : List[Any] ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_703E00, 2.1_107E00, -2.0_811E00, 8.8_685E-01, 2.4_360E-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_636E-01, 2.3_478E-01, -1.6_963E00, -1.7_381E00, -8.6_337E-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_768E-01, -4.7_429E-01, -1.0_897E00, -1.0_248E00, 3.5_523E-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_330E-01, 2.4_211E-01, -6.0_185E-01, -8.2_789E-01, -6.0_446E-02] )
def lowerCAmelCase__ ( lowerCamelCase : Union[str, Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : Dict ):
_A : Union[str, Any] = dct.pop(lowerCamelCase )
_A : List[str] = val
def lowerCAmelCase__ ( lowerCamelCase : Union[str, Any] ):
_A : Optional[Any] = []
for k in state_dict.keys():
_A : Optional[int] = k
if ".pwconv" in k:
_A : str = k_new.replace('.pwconv' ,'.point_wise_conv' )
if ".dwconv" in k:
_A : Any = k_new.replace('.dwconv' ,'.depth_wise_conv' )
if ".Proj." in k:
_A : Optional[Any] = k_new.replace('.Proj.' ,'.proj.' )
if "patch_embed" in k_new:
_A : Optional[int] = k_new.replace('patch_embed' ,'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
_A : Tuple = k_new.split('.' )
if ls[2].isdigit():
_A : List[Any] = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
_A : List[str] = k_new.replace('network' ,'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def lowerCAmelCase__ ( lowerCamelCase : int ,lowerCamelCase : str ,lowerCamelCase : List[str] ):
_A : Dict = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
_A : Any = 1000
_A : int = 'huggingface/label-files'
_A : List[Any] = 'imagenet-1k-id2label.json'
_A : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase ,lowerCamelCase ,repo_type='dataset' ) ,'r' ) )
_A : Dict = {int(lowerCamelCase ): v for k, v in idalabel.items()}
_A : Optional[int] = idalabel
_A : Any = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
_A : Optional[Any] = [3, 3, 6, 4]
_A : Optional[int] = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
_A : List[Any] = [3, 3, 9, 6]
_A : Tuple = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
_A : int = [4, 3, 10, 5]
_A : int = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
_A : Optional[Any] = [4, 4, 12, 6]
_A : Any = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
_A : Optional[Any] = torch.hub.load_state_dict_from_url(lowerCamelCase ,map_location='cpu' ,check_hash=lowerCamelCase )
else:
_A : Union[str, Any] = torch.load(lowerCamelCase ,map_location='cpu' )
_A : Union[str, Any] = checkpoint
_A : List[str] = create_rename_keys(lowerCamelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(lowerCamelCase ,lowerCamelCase ,lowerCamelCase )
# load HuggingFace model
_A : str = SwiftFormerForImageClassification(lowerCamelCase ).eval()
hf_model.load_state_dict(lowerCamelCase )
# prepare test inputs
_A : Any = prepare_img()
_A : Optional[int] = ViTImageProcessor.from_pretrained('preprocessor_config' )
_A : Any = processor(images=lowerCamelCase ,return_tensors='pt' )
# compare outputs from both models
_A : int = get_expected_output(lowerCamelCase )
_A : Optional[int] = hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] ,lowerCamelCase ,atol=1E-3 )
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
print(F'Saving model {swiftformer_name} to {pytorch_dump_folder_path}' )
hf_model.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swiftformer_name''',
default='''swiftformer_xs''',
choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''],
type=str,
help='''Name of the SwiftFormer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''./converted_outputs/''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''')
A : List[str] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 227 | 0 |
"""simple docstring"""
import doctest
from collections import deque
import numpy as np
class snake_case :
"""simple docstring"""
def __init__( self : Union[str, Any] ):
UpperCAmelCase__ = [2, 1, 2, -1]
UpperCAmelCase__ = [1, 2, 3, 4]
def __lowerCAmelCase ( self : Union[str, Any] ):
UpperCAmelCase__ = len(self.first_signal )
UpperCAmelCase__ = len(self.second_signal )
UpperCAmelCase__ = max(lowerCamelCase__ ,lowerCamelCase__ )
# create a zero matrix of max_length x max_length
UpperCAmelCase__ = [[0] * max_length for i in range(lowerCamelCase__ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowerCamelCase__ ):
UpperCAmelCase__ = deque(self.second_signal )
rotated_signal.rotate(lowerCamelCase__ )
for j, item in enumerate(lowerCamelCase__ ):
matrix[i][j] += item
# multiply the matrix with the first signal
UpperCAmelCase__ = np.matmul(np.transpose(lowerCamelCase__ ) ,np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(lowerCamelCase__ ,2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 98 |
"""simple docstring"""
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = (PNDMScheduler,)
snake_case__ = (("num_inference_steps", 50),)
def __lowerCAmelCase ( self : List[str] ,**lowerCamelCase__ : str ):
UpperCAmelCase__ = {
'num_train_timesteps': 1_000,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
}
config.update(**lowerCamelCase__ )
return config
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : Optional[Any]=0 ,**lowerCamelCase__ : List[str] ):
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop('num_inference_steps' ,lowerCamelCase__ )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config(**lowerCamelCase__ )
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
UpperCAmelCase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
UpperCAmelCase__ = scheduler_class.from_pretrained(lowerCamelCase__ )
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = new_scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = new_scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self : Tuple ):
pass
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : List[str]=0 ,**lowerCamelCase__ : Tuple ):
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop('num_inference_steps' ,lowerCamelCase__ )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
UpperCAmelCase__ = scheduler_class.from_pretrained(lowerCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = new_scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = new_scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self : List[Any] ,**lowerCamelCase__ : int ):
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config(**lowerCamelCase__ )
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
UpperCAmelCase__ = 10
UpperCAmelCase__ = self.dummy_model()
UpperCAmelCase__ = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase__ )
for i, t in enumerate(scheduler.prk_timesteps ):
UpperCAmelCase__ = model(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
UpperCAmelCase__ = model(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).prev_sample
return sample
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop('num_inference_steps' ,lowerCamelCase__ )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCamelCase__ ,'set_timesteps' ):
scheduler.set_timesteps(lowerCamelCase__ )
elif num_inference_steps is not None and not hasattr(lowerCamelCase__ ,'set_timesteps' ):
UpperCAmelCase__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,0 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,1 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,0 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,1 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def __lowerCAmelCase ( self : List[Any] ):
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[int] ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCamelCase__ )
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config(steps_offset=1 )
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps ,torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) ,)
def __lowerCAmelCase ( self : Dict ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1] ,[0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=lowerCamelCase__ ,beta_end=lowerCamelCase__ )
def __lowerCAmelCase ( self : Union[str, Any] ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[Any] ):
for t in [1, 5, 10]:
self.check_over_forward(time_step=lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ):
for t, num_inference_steps in zip([1, 5, 10] ,[10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowerCamelCase__ )
def __lowerCAmelCase ( self : int ):
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
UpperCAmelCase__ = 27
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).prev_sample
def __lowerCAmelCase ( self : int ):
with self.assertRaises(lowerCamelCase__ ):
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.step_plms(self.dummy_sample ,1 ,self.dummy_sample ).prev_sample
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = self.full_loop()
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 1_9_8.1_3_1_8 ) < 1e-2
assert abs(result_mean.item() - 0.2_5_8_0 ) < 1e-3
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = self.full_loop(prediction_type='v_prediction' )
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 6_7.3_9_8_6 ) < 1e-2
assert abs(result_mean.item() - 0.0_8_7_8 ) < 1e-3
def __lowerCAmelCase ( self : Union[str, Any] ):
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase__ = self.full_loop(set_alpha_to_one=lowerCamelCase__ ,beta_start=0.0_1 )
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 2_3_0.0_3_9_9 ) < 1e-2
assert abs(result_mean.item() - 0.2_9_9_5 ) < 1e-3
def __lowerCAmelCase ( self : Tuple ):
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase__ = self.full_loop(set_alpha_to_one=lowerCamelCase__ ,beta_start=0.0_1 )
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 1_8_6.9_4_8_2 ) < 1e-2
assert abs(result_mean.item() - 0.2_4_3_4 ) < 1e-3
| 98 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__lowerCAmelCase = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
__lowerCAmelCase = {
"""google/realm-cc-news-pretrained-embedder""": 5_1_2,
"""google/realm-cc-news-pretrained-encoder""": 5_1_2,
"""google/realm-cc-news-pretrained-scorer""": 5_1_2,
"""google/realm-cc-news-pretrained-openqa""": 5_1_2,
"""google/realm-orqa-nq-openqa""": 5_1_2,
"""google/realm-orqa-nq-reader""": 5_1_2,
"""google/realm-orqa-wq-openqa""": 5_1_2,
"""google/realm-orqa-wq-reader""": 5_1_2,
}
__lowerCAmelCase = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class UpperCAmelCase__ ( __lowercase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = VOCAB_FILES_NAMES
__UpperCAmelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : List[str] = RealmTokenizer
def __init__( self : str ,_a : str=None ,_a : List[str]=None ,_a : Union[str, Any]=True ,_a : List[Any]="[UNK]" ,_a : str="[SEP]" ,_a : Optional[int]="[PAD]" ,_a : List[str]="[CLS]" ,_a : Any="[MASK]" ,_a : List[str]=True ,_a : Union[str, Any]=None ,**_a : Tuple ,):
'''simple docstring'''
super().__init__(
UpperCAmelCase__ ,tokenizer_file=UpperCAmelCase__ ,do_lower_case=UpperCAmelCase__ ,unk_token=UpperCAmelCase__ ,sep_token=UpperCAmelCase__ ,pad_token=UpperCAmelCase__ ,cls_token=UpperCAmelCase__ ,mask_token=UpperCAmelCase__ ,tokenize_chinese_chars=UpperCAmelCase__ ,strip_accents=UpperCAmelCase__ ,**UpperCAmelCase__ ,)
_a : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' ,UpperCAmelCase__ ) != do_lower_case
or normalizer_state.get('strip_accents' ,UpperCAmelCase__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,UpperCAmelCase__ ) != tokenize_chinese_chars
):
_a : Optional[Any] = getattr(UpperCAmelCase__ ,normalizer_state.pop('type' ) )
_a : Dict = do_lower_case
_a : int = strip_accents
_a : List[str] = tokenize_chinese_chars
_a : Dict = normalizer_class(**UpperCAmelCase__ )
_a : Optional[int] = do_lower_case
def __lowercase ( self : Any ,_a : Dict ,**_a : Dict ):
'''simple docstring'''
_a : str = PaddingStrategy.MAX_LENGTH
_a : Optional[Any] = text
_a : Tuple = kwargs.pop('text_pair' ,UpperCAmelCase__ )
_a : List[Any] = kwargs.pop('return_tensors' ,UpperCAmelCase__ )
_a : List[Any] = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(UpperCAmelCase__ ):
if batch_text_pair is not None:
_a : Any = batch_text_pair[idx]
else:
_a : Any = None
_a : Optional[int] = super().__call__(UpperCAmelCase__ ,UpperCAmelCase__ ,return_tensors=UpperCAmelCase__ ,**UpperCAmelCase__ )
_a : int = encoded_candidates.get('input_ids' )
_a : Optional[int] = encoded_candidates.get('attention_mask' )
_a : int = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(UpperCAmelCase__ )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(UpperCAmelCase__ )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(UpperCAmelCase__ )
_a : str = {key: item for key, item in output_data.items() if len(UpperCAmelCase__ ) != 0}
return BatchEncoding(UpperCAmelCase__ ,tensor_type=UpperCAmelCase__ )
def __lowercase ( self : str ,_a : Any ,_a : Tuple=None ):
'''simple docstring'''
_a : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowercase ( self : Dict ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
_a : Tuple = [self.sep_token_id]
_a : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowercase ( self : Union[str, Any] ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
_a : Union[str, Any] = self._tokenizer.model.save(UpperCAmelCase__ ,name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
| 360 |
'''simple docstring'''
def UpperCAmelCase_ (__a : list[int] , __a : list[int] ):
"""simple docstring"""
if not len(__a ) == len(__a ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
_a, _a, _a : Tuple = equationa
_a, _a, _a : str = equationa
# Calculate the determinants of the matrices
_a : Union[str, Any] = aa * ba - aa * ba
_a : List[Any] = ca * ba - ca * ba
_a : List[Any] = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_a : int = determinant_x / determinant
_a : List[str] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 5 | 0 |
"""simple docstring"""
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
__A : int = Lock()
def lowercase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(_lowercase )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
_UpperCAmelCase = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
_UpperCAmelCase = min(_lowercase , _lowercase )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(_lowercase )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
_UpperCAmelCase = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
_UpperCAmelCase = max(_lowercase , _lowercase )
# after all swaps are performed, send the values back to main
result_pipe[1].send(_lowercase )
def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
_UpperCAmelCase = []
_UpperCAmelCase = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
_UpperCAmelCase = Pipe()
_UpperCAmelCase = Pipe()
process_array_.append(
Process(
target=_lowercase , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
_UpperCAmelCase = temp_rs
_UpperCAmelCase = temp_rr
for i in range(1 , len(_lowercase ) - 1 ):
_UpperCAmelCase = Pipe()
_UpperCAmelCase = Pipe()
process_array_.append(
Process(
target=_lowercase , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
_UpperCAmelCase = temp_rs
_UpperCAmelCase = temp_rr
process_array_.append(
Process(
target=_lowercase , args=(
len(_lowercase ) - 1,
arr[len(_lowercase ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(_lowercase ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(_lowercase ) ):
_UpperCAmelCase = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = list(range(10 , 0 , -1 ) )
print('''Initial List''' )
print(*_lowercase )
_UpperCAmelCase = odd_even_transposition(_lowercase )
print('''Sorted List\n''' )
print(*_lowercase )
if __name__ == "__main__":
main()
| 260 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__a = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 66 | 0 |
'''simple docstring'''
from math import factorial
def lowerCamelCase__ ( _A = 100 ):
return sum(map(_A , str(factorial(_A ) ) ) )
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 354 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase: List[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: Optional[int] = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: Dict = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
lowerCAmelCase: Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 96 | 0 |
'''simple docstring'''
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class __magic_name__ ( _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : Optional[int] = FlaxAutoencoderKL
@property
def __lowercase ( self : str ):
_a : Dict = 4
_a : Dict = 3
_a : str = (32, 32)
_a : Tuple = jax.random.PRNGKey(0 )
_a : List[str] = jax.random.uniform(_UpperCAmelCase ,((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def __lowercase ( self : Tuple ):
_a : Any = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
_a : Any = self.dummy_input
return init_dict, inputs_dict
| 89 |
'''simple docstring'''
def __lowerCamelCase ( ) -> Tuple:
for n in range(1 , 1000000 ):
yield n * (n + 1) // 2
def __lowerCamelCase ( lowerCAmelCase_ ) -> List[Any]:
_a : Any = 1
_a : Tuple = 2
while i * i <= n:
_a : Tuple = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def __lowerCamelCase ( ) -> str:
return next(i for i in triangle_number_generator() if count_divisors(lowerCAmelCase_ ) > 500 )
if __name__ == "__main__":
print(solution())
| 89 | 1 |
"""simple docstring"""
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
A__ : Dict = 'src/transformers'
A__ : Union[str, Any] = 'docs/source/en/tasks'
def _snake_case ( lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[int] ) -> Optional[Any]:
with open(lowerCamelCase__ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCamelCase_ : Any =f.readlines()
# Find the start prompt.
lowerCamelCase_ : Optional[int] =0
while not lines[start_index].startswith(lowerCamelCase__ ):
start_index += 1
start_index += 1
lowerCamelCase_ : Optional[int] =start_index
while not lines[end_index].startswith(lowerCamelCase__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
A__ : int = direct_transformers_import(TRANSFORMERS_PATH)
A__ : Dict = {
'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
A__ : Optional[Any] = {
'summarization.md': ('nllb',),
'translation.md': ('nllb',),
}
def _snake_case ( lowerCamelCase__ : Dict ) -> Any:
lowerCamelCase_ : int =TASK_GUIDE_TO_MODELS[task_guide]
lowerCamelCase_ : Optional[Any] =SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(lowerCamelCase__ , set() )
lowerCamelCase_ : Dict ={
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n"
def _snake_case ( lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Union[str, Any]=False ) -> Union[str, Any]:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : List[Any] =_find_text_in_file(
filename=os.path.join(lowerCamelCase__ , lowerCamelCase__ ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , )
lowerCamelCase_ : Tuple =get_model_list_for_task(lowerCamelCase__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(lowerCamelCase__ , lowerCamelCase__ ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"""
" to fix this." )
if __name__ == "__main__":
A__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
A__ : List[str] = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 209 |
"""simple docstring"""
def _snake_case ( lowerCamelCase__ : Optional[Any] ) -> Optional[int]:
if not head:
return True
# split the list to two parts
lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] =head.next, head
while fast and fast.next:
lowerCamelCase_ : Optional[Any] =fast.next.next
lowerCamelCase_ : str =slow.next
lowerCamelCase_ : Tuple =slow.next
lowerCamelCase_ : Any =None # Don't forget here! But forget still works!
# reverse the second part
lowerCamelCase_ : List[str] =None
while second:
lowerCamelCase_ : Any =second.next
lowerCamelCase_ : Union[str, Any] =node
lowerCamelCase_ : Union[str, Any] =second
lowerCamelCase_ : Optional[Any] =nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
lowerCamelCase_ : List[str] =node.next
lowerCamelCase_ : Optional[Any] =head.next
return True
def _snake_case ( lowerCamelCase__ : str ) -> Optional[int]:
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
lowerCamelCase_ : List[str] =head
while fast and fast.next:
lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] =fast.next.next, slow.next
# 2. Push the second half into the stack
lowerCamelCase_ : List[Any] =[slow.val]
while slow.next:
lowerCamelCase_ : List[Any] =slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
lowerCamelCase_ : Union[str, Any] =cur.next
return True
def _snake_case ( lowerCamelCase__ : Dict ) -> Optional[Any]:
if not head or not head.next:
return True
lowerCamelCase_ : Union[str, Any] ={}
lowerCamelCase_ : List[Any] =0
while head:
if head.val in d:
d[head.val].append(lowerCamelCase__ )
else:
lowerCamelCase_ : List[str] =[pos]
lowerCamelCase_ : Optional[int] =head.next
pos += 1
lowerCamelCase_ : Union[str, Any] =pos - 1
lowerCamelCase_ : Optional[int] =0
for v in d.values():
if len(lowerCamelCase__ ) % 2 != 0:
middle += 1
else:
lowerCamelCase_ : Optional[Any] =0
for i in range(0 , len(lowerCamelCase__ ) ):
if v[i] + v[len(lowerCamelCase__ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 209 | 1 |
from __future__ import annotations
def __lowercase ( _UpperCamelCase ) ->int:
"""simple docstring"""
if not nums:
return 0
lowercase : Optional[int] = nums[0]
lowercase : Union[str, Any] = 0
for num in nums[1:]:
lowercase , lowercase : Optional[Any] = (
max_excluding + num,
max(_UpperCamelCase, _UpperCamelCase ),
)
return max(_UpperCamelCase, _UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337 |
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
__a = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
warnings.warn(
'''The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DeiTImageProcessor instead.''' , SCREAMING_SNAKE_CASE__ , )
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 337 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict=7 , _lowerCAmelCase : Optional[int]=3 , _lowerCAmelCase : Optional[Any]=30 , _lowerCAmelCase : List[Any]=400 , _lowerCAmelCase : int=True , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : Dict=True , _lowerCAmelCase : List[str]=[0.5, 0.5, 0.5] , _lowerCAmelCase : Union[str, Any]=[0.5, 0.5, 0.5] , _lowerCAmelCase : int=True , _lowerCAmelCase : Union[str, Any]=1 / 255 , _lowerCAmelCase : List[Any]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
SCREAMING_SNAKE_CASE_ = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1_333}
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = min_resolution
SCREAMING_SNAKE_CASE_ = max_resolution
SCREAMING_SNAKE_CASE_ = do_resize
SCREAMING_SNAKE_CASE_ = size
SCREAMING_SNAKE_CASE_ = do_normalize
SCREAMING_SNAKE_CASE_ = image_mean
SCREAMING_SNAKE_CASE_ = image_std
SCREAMING_SNAKE_CASE_ = do_rescale
SCREAMING_SNAKE_CASE_ = rescale_factor
SCREAMING_SNAKE_CASE_ = do_pad
def lowerCAmelCase_ ( self : str ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : int , _lowerCAmelCase : List[str]=False ):
if not batched:
SCREAMING_SNAKE_CASE_ = image_inputs[0]
if isinstance(_lowerCAmelCase , Image.Image ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = image.size
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE_ = int(self.size['shortest_edge'] * h / w )
SCREAMING_SNAKE_CASE_ = self.size['shortest_edge']
elif w > h:
SCREAMING_SNAKE_CASE_ = self.size['shortest_edge']
SCREAMING_SNAKE_CASE_ = int(self.size['shortest_edge'] * w / h )
else:
SCREAMING_SNAKE_CASE_ = self.size['shortest_edge']
SCREAMING_SNAKE_CASE_ = self.size['shortest_edge']
else:
SCREAMING_SNAKE_CASE_ = []
for image in image_inputs:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE_ = max(_lowerCAmelCase , key=lambda _lowerCAmelCase : item[0] )[0]
SCREAMING_SNAKE_CASE_ = max(_lowerCAmelCase , key=lambda _lowerCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = DetaImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = DetaImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self : Optional[int] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , 'image_mean' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'image_std' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'do_rescale' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'do_pad' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'size' ) )
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1_333} )
self.assertEqual(image_processor.do_pad , _lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
pass
def lowerCAmelCase_ ( self : int ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.image_processor_tester.get_expected_values(_lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.image_processor_tester.get_expected_values(_lowerCAmelCase , batched=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = image_processing(_lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self : Optional[int] ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.image_processor_tester.get_expected_values(_lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(_lowerCAmelCase , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.image_processor_tester.get_expected_values(_lowerCAmelCase , batched=_lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self : Optional[Any] ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.image_processor_tester.get_expected_values(_lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(_lowerCAmelCase , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.image_processor_tester.get_expected_values(_lowerCAmelCase , batched=_lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
# prepare image and target
SCREAMING_SNAKE_CASE_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
SCREAMING_SNAKE_CASE_ = json.loads(f.read() )
SCREAMING_SNAKE_CASE_ = {'image_id': 39_769, 'annotations': target}
# encode them
SCREAMING_SNAKE_CASE_ = DetaImageProcessor()
SCREAMING_SNAKE_CASE_ = image_processing(images=_lowerCAmelCase , annotations=_lowerCAmelCase , return_tensors='pt' )
# verify pixel values
SCREAMING_SNAKE_CASE_ = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _lowerCAmelCase , atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE_ = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _lowerCAmelCase ) )
# verify boxes
SCREAMING_SNAKE_CASE_ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _lowerCAmelCase , atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE_ = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _lowerCAmelCase ) )
# verify is_crowd
SCREAMING_SNAKE_CASE_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _lowerCAmelCase ) )
# verify class_labels
SCREAMING_SNAKE_CASE_ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _lowerCAmelCase ) )
# verify orig_size
SCREAMING_SNAKE_CASE_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _lowerCAmelCase ) )
# verify size
SCREAMING_SNAKE_CASE_ = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _lowerCAmelCase ) )
@slow
def lowerCAmelCase_ ( self : str ):
# prepare image, target and masks_path
SCREAMING_SNAKE_CASE_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
SCREAMING_SNAKE_CASE_ = json.loads(f.read() )
SCREAMING_SNAKE_CASE_ = {'file_name': '000000039769.png', 'image_id': 39_769, 'segments_info': target}
SCREAMING_SNAKE_CASE_ = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
SCREAMING_SNAKE_CASE_ = DetaImageProcessor(format='coco_panoptic' )
SCREAMING_SNAKE_CASE_ = image_processing(images=_lowerCAmelCase , annotations=_lowerCAmelCase , masks_path=_lowerCAmelCase , return_tensors='pt' )
# verify pixel values
SCREAMING_SNAKE_CASE_ = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _lowerCAmelCase , atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE_ = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _lowerCAmelCase ) )
# verify boxes
SCREAMING_SNAKE_CASE_ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _lowerCAmelCase , atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE_ = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _lowerCAmelCase ) )
# verify is_crowd
SCREAMING_SNAKE_CASE_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _lowerCAmelCase ) )
# verify class_labels
SCREAMING_SNAKE_CASE_ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _lowerCAmelCase ) )
# verify masks
SCREAMING_SNAKE_CASE_ = 822_873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _lowerCAmelCase )
# verify orig_size
SCREAMING_SNAKE_CASE_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _lowerCAmelCase ) )
# verify size
SCREAMING_SNAKE_CASE_ = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _lowerCAmelCase ) )
| 210 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@staticmethod
@abstractmethod
def lowerCAmelCase_ ( _lowerCAmelCase : ArgumentParser ):
raise NotImplementedError()
@abstractmethod
def lowerCAmelCase_ ( self : Dict ):
raise NotImplementedError()
| 210 | 1 |
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
A__ : int = 2
class __snake_case :
def __init__( self : int , *, # begin keyword-only arguments
A_ : List[str]="<s>" , A_ : int="<pad>" , A_ : Union[str, Any]="</s>" , A_ : Optional[Any]="<unk>" , A_ : Optional[Any]=None , ):
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = bos, unk, pad, eos
lowerCAmelCase_ : Union[str, Any] = []
lowerCAmelCase_ : int = []
lowerCAmelCase_ : Optional[Any] = {}
lowerCAmelCase_ : int = self.add_symbol(lowerCamelCase_)
lowerCAmelCase_ : Union[str, Any] = self.add_symbol(lowerCamelCase_)
lowerCAmelCase_ : Union[str, Any] = self.add_symbol(lowerCamelCase_)
lowerCAmelCase_ : Dict = self.add_symbol(lowerCamelCase_)
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(lowerCamelCase_)
lowerCAmelCase_ : Optional[int] = len(self.symbols)
def __eq__( self : int , A_ : Dict):
return self.indices == other.indices
def __getitem__( self : Optional[Any] , A_ : Optional[Any]):
if idx < len(self.symbols):
return self.symbols[idx]
return self.unk_word
def __len__( self : Optional[int]):
return len(self.symbols)
def __contains__( self : int , A_ : int):
return sym in self.indices
@classmethod
def UpperCAmelCase__ ( cls : int , A_ : Optional[int]):
lowerCAmelCase_ : str = cls()
d.add_from_file(lowerCamelCase_)
return d
def UpperCAmelCase__ ( self : Union[str, Any] , A_ : List[Any] , A_ : Union[str, Any]=1 , A_ : List[str]=False):
if word in self.indices and not overwrite:
lowerCAmelCase_ : Any = self.indices[word]
lowerCAmelCase_ : Dict = self.count[idx] + n
return idx
else:
lowerCAmelCase_ : Tuple = len(self.symbols)
lowerCAmelCase_ : List[str] = idx
self.symbols.append(lowerCamelCase_)
self.count.append(lowerCamelCase_)
return idx
def UpperCAmelCase__ ( self : List[Any] , A_ : Union[str, Any]):
return 0
def UpperCAmelCase__ ( self : List[Any] , A_ : int):
if isinstance(lowerCamelCase_ , lowerCamelCase_):
try:
with open(lowerCamelCase_ , '''r''' , encoding='''utf-8''') as fd:
self.add_from_file(lowerCamelCase_)
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('''Incorrect encoding detected in {}, please rebuild the dataset'''.format(lowerCamelCase_))
return
lowerCAmelCase_ : Union[str, Any] = f.readlines()
lowerCAmelCase_ : Optional[int] = self._load_meta(lowerCamelCase_)
for line in lines[indices_start_line:]:
try:
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = line.rstrip().rsplit(''' ''' , 1)
if field == "#fairseq:overwrite":
lowerCAmelCase_ : List[str] = True
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = line.rsplit(''' ''' , 1)
else:
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : List[str] = int(lowerCamelCase_)
lowerCAmelCase_ : List[str] = line
if word in self and not overwrite:
raise RuntimeError(
'''Duplicate word found when loading Dictionary: \'{}\'. '''
'''Duplicate words can overwrite earlier ones by adding the '''
'''#fairseq:overwrite flag at the end of the corresponding row '''
'''in the dictionary file. If using the Camembert model, please '''
'''download an updated copy of the model file.'''.format(lowerCamelCase_))
self.add_symbol(lowerCamelCase_ , n=lowerCamelCase_ , overwrite=lowerCamelCase_)
except ValueError:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt> [flags]\'''')
def UpperCamelCase( __UpperCamelCase : List[str] ):
lowerCAmelCase_ : Tuple = dict((re.sub(R'''@@$''' ,'''''' ,__UpperCamelCase ), v) if k.endswith('''@@''' ) else (re.sub(R'''$''' ,'''</w>''' ,__UpperCamelCase ), v) for k, v in d.items() )
lowerCAmelCase_ : Optional[int] = '''<s> <pad> </s> <unk>'''.split()
# restore the special tokens
for k in keep_keys:
del da[f"""{k}</w>"""]
lowerCAmelCase_ : Optional[Any] = d[k] # restore
return da
def UpperCamelCase( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : int ):
if not os.path.exists(__UpperCamelCase ):
raise ValueError(f"""path {biogpt_checkpoint_path} does not exist!""" )
os.makedirs(__UpperCamelCase ,exist_ok=__UpperCamelCase )
print(f"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
lowerCAmelCase_ : Optional[int] = os.path.join(__UpperCamelCase ,'''checkpoint.pt''' )
if not os.path.isfile(__UpperCamelCase ):
raise ValueError(f"""path to the file {checkpoint_file} does not exist!""" )
lowerCAmelCase_ : Optional[Any] = torch.load(__UpperCamelCase ,map_location='''cpu''' )
lowerCAmelCase_ : Any = chkpt['''cfg''']['''model''']
# dicts
lowerCAmelCase_ : Optional[int] = os.path.join(__UpperCamelCase ,'''dict.txt''' )
if not os.path.isfile(__UpperCamelCase ):
raise ValueError(f"""path to the file {dict_file} does not exist!""" )
lowerCAmelCase_ : Tuple = Dictionary.load(__UpperCamelCase )
lowerCAmelCase_ : Union[str, Any] = rewrite_dict_keys(src_dict.indices )
lowerCAmelCase_ : Any = len(__UpperCamelCase )
lowerCAmelCase_ : str = os.path.join(__UpperCamelCase ,VOCAB_FILES_NAMES['''vocab_file'''] )
print(f"""Generating {src_vocab_file} of {src_vocab_size} records""" )
with open(__UpperCamelCase ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(json.dumps(__UpperCamelCase ,ensure_ascii=__UpperCamelCase ,indent=__UpperCamelCase ) )
# merges_file (bpecodes)
lowerCAmelCase_ : str = os.path.join(__UpperCamelCase ,'''bpecodes''' )
if not os.path.isfile(__UpperCamelCase ):
raise ValueError(f"""path to the file {bpecodes_file} does not exist!""" )
lowerCAmelCase_ : Union[str, Any] = os.path.join(__UpperCamelCase ,VOCAB_FILES_NAMES['''merges_file'''] )
shutil.copyfile(__UpperCamelCase ,__UpperCamelCase )
# model config
lowerCAmelCase_ : List[Any] = os.path.join(__UpperCamelCase ,'''config.json''' )
lowerCAmelCase_ : str = {
'''activation_dropout''': args['''activation_dropout'''],
'''architectures''': ['''BioGptForCausalLM'''],
'''attention_probs_dropout_prob''': args['''attention_dropout'''],
'''bos_token_id''': 0,
'''eos_token_id''': 2,
'''hidden_act''': args['''activation_fn'''],
'''hidden_dropout_prob''': args['''dropout'''],
'''hidden_size''': args['''decoder_embed_dim'''],
'''initializer_range''': 0.0_2,
'''intermediate_size''': args['''decoder_ffn_embed_dim'''],
'''layer_norm_eps''': 1e-1_2,
'''layerdrop''': args['''decoder_layerdrop'''],
'''max_position_embeddings''': args['''max_target_positions'''],
'''model_type''': '''biogpt''',
'''num_attention_heads''': args['''decoder_attention_heads'''],
'''num_hidden_layers''': args['''decoder_layers'''],
'''pad_token_id''': 1,
'''scale_embedding''': not args['''no_scale_embedding'''],
'''tie_word_embeddings''': args['''share_decoder_input_output_embed'''],
'''vocab_size''': src_vocab_size,
}
# good hparam defaults to start with
print(f"""Generating {biogpt_model_config_file}""" )
with open(__UpperCamelCase ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(json.dumps(__UpperCamelCase ,ensure_ascii=__UpperCamelCase ,indent=__UpperCamelCase ) )
# tokenizer config
lowerCAmelCase_ : List[str] = os.path.join(__UpperCamelCase ,__UpperCamelCase )
lowerCAmelCase_ : List[str] = {
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
'''model_max_length''': 1024,
'''pad_token''': '''<pad>''',
'''special_tokens_map_file''': None,
'''tokenizer_class''': '''BioGptTokenizer''',
'''unk_token''': '''<unk>''',
}
print(f"""Generating {biogpt_tokenizer_config_file}""" )
with open(__UpperCamelCase ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(json.dumps(__UpperCamelCase ,ensure_ascii=__UpperCamelCase ,indent=__UpperCamelCase ) )
# model
lowerCAmelCase_ : List[Any] = chkpt['''model''']
# remove unneeded keys
lowerCAmelCase_ : Optional[int] = [
'''decoder.version''',
]
for k in ignore_keys:
model_state_dict.pop(__UpperCamelCase ,__UpperCamelCase )
lowerCAmelCase_ : Optional[Any] = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('''output_projection.weight''' ):
lowerCAmelCase_ : str = model_state_dict.pop(__UpperCamelCase )
else:
lowerCAmelCase_ : Optional[Any] = model_state_dict.pop(__UpperCamelCase )
lowerCAmelCase_ : Any = BioGptConfig.from_pretrained(__UpperCamelCase )
lowerCAmelCase_ : Union[str, Any] = BioGptForCausalLM(__UpperCamelCase )
# check that it loads ok
model_new.load_state_dict(__UpperCamelCase )
# save
lowerCAmelCase_ : Tuple = os.path.join(__UpperCamelCase ,__UpperCamelCase )
print(f"""Generating {pytorch_weights_dump_path}""" )
torch.save(__UpperCamelCase ,__UpperCamelCase )
print('''Conversion is done!''' )
if __name__ == "__main__":
A__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--biogpt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
A__ : Optional[int] = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 103 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def a( ) -> Optional[int]:
"""simple docstring"""
a = "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"
a = Image.open(requests.get(A , stream=A ).raw ).convert("RGB" )
return image
def a( A : Tuple ) -> List[str]:
"""simple docstring"""
a = []
# fmt: off
# vision encoder
rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") )
rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") )
rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") )
rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") )
rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") )
rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.layernorm.weight") )
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.layernorm.bias") )
# fmt: on
return rename_keys
def a( A : str , A : Tuple , A : Optional[int] ) -> int:
"""simple docstring"""
a = dct.pop(A )
a = val
def a( A : Union[str, Any] , A : str ) -> List[Any]:
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
a = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
a = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
a = torch.cat((q_bias, torch.zeros_like(A , requires_grad=A ), v_bias) )
a = qkv_bias
def a( A : Union[str, Any] , A : Tuple ) -> str:
"""simple docstring"""
a = 364 if "coco" in model_name else 224
a = BlipaVisionConfig(image_size=A ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
a = OPTConfig.from_pretrained("facebook/opt-2.7b" , eos_token_id=A ).to_dict()
elif "opt-6.7b" in model_name:
a = OPTConfig.from_pretrained("facebook/opt-6.7b" , eos_token_id=A ).to_dict()
elif "t5-xl" in model_name:
a = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
a = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
a = BlipaConfig(vision_config=A , text_config=A )
return config, image_size
@torch.no_grad()
def a( A : Optional[int] , A : List[str]=None , A : Optional[int]=False ) -> Any:
"""simple docstring"""
a = (
AutoTokenizer.from_pretrained("facebook/opt-2.7b" )
if "opt" in model_name
else AutoTokenizer.from_pretrained("google/flan-t5-xl" )
)
a = tokenizer("\n" , add_special_tokens=A ).input_ids[0]
a , a = get_blipa_config(A , eos_token_id=A )
a = BlipaForConditionalGeneration(A ).eval()
a = {
"blip2-opt-2.7b": ("blip2_opt", "pretrain_opt2.7b"),
"blip2-opt-6.7b": ("blip2_opt", "pretrain_opt6.7b"),
"blip2-opt-2.7b-coco": ("blip2_opt", "caption_coco_opt2.7b"),
"blip2-opt-6.7b-coco": ("blip2_opt", "caption_coco_opt6.7b"),
"blip2-flan-t5-xl": ("blip2_t5", "pretrain_flant5xl"),
"blip2-flan-t5-xl-coco": ("blip2_t5", "caption_coco_flant5xl"),
"blip2-flan-t5-xxl": ("blip2_t5", "pretrain_flant5xxl"),
}
a , a = model_name_to_original[model_name]
# load original model
print("Loading original model..." )
a = "cuda" if torch.cuda.is_available() else "cpu"
a , a , a = load_model_and_preprocess(
name=A , model_type=A , is_eval=A , device=A )
original_model.eval()
print("Done!" )
# update state dict keys
a = original_model.state_dict()
a = create_rename_keys(A )
for src, dest in rename_keys:
rename_key(A , A , A )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
a = state_dict.pop(A )
if key.startswith("Qformer.bert" ):
a = key.replace("Qformer.bert" , "qformer" )
if "attention.self" in key:
a = key.replace("self" , "attention" )
if "opt_proj" in key:
a = key.replace("opt_proj" , "language_projection" )
if "t5_proj" in key:
a = key.replace("t5_proj" , "language_projection" )
if key.startswith("opt" ):
a = key.replace("opt" , "language" )
if key.startswith("t5" ):
a = key.replace("t5" , "language" )
a = val
# read in qv biases
read_in_q_v_bias(A , A )
a , a = hf_model.load_state_dict(A , strict=A )
assert len(A ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
a = load_demo_image()
a = vis_processors["eval"](A ).unsqueeze(0 ).to(A )
a = tokenizer(["\n"] , return_tensors="pt" ).input_ids.to(A )
# create processor
a = BlipImageProcessor(
size={"height": image_size, "width": image_size} , image_mean=A , image_std=A )
a = BlipaProcessor(image_processor=A , tokenizer=A )
a = processor(images=A , return_tensors="pt" ).pixel_values.to(A )
# make sure processor creates exact same pixel values
assert torch.allclose(A , A )
original_model.to(A )
hf_model.to(A )
with torch.no_grad():
if "opt" in model_name:
a = original_model({"image": original_pixel_values, "text_input": [""]} ).logits
a = hf_model(A , A ).logits
else:
a = original_model(
{"image": original_pixel_values, "text_input": ["\n"], "text_output": ["\n"]} ).logits
a = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
a = hf_model(A , A , labels=A ).logits
assert original_logits.shape == logits.shape
print("First values of original logits:" , original_logits[0, :3, :3] )
print("First values of HF logits:" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
a = torch.tensor(
[[-41.5_850, -4.4_440, -8.9_922], [-47.4_322, -5.9_143, -1.7_340]] , device=A )
assert torch.allclose(logits[0, :3, :3] , A , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
a = torch.tensor(
[[-57.0_109, -9.8_967, -12.6_280], [-68.6_578, -12.7_191, -10.5_065]] , device=A )
else:
# cast to same type
a = logits.dtype
assert torch.allclose(original_logits.to(A ) , A , atol=1e-2 )
print("Looks ok!" )
print("Generating a caption..." )
a = ""
a = tokenizer(A , return_tensors="pt" ).input_ids.to(A )
a = original_model.generate({"image": original_pixel_values} )
a = hf_model.generate(
A , A , do_sample=A , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("Original generation:" , A )
a = input_ids.shape[1]
a = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=A )
a = [text.strip() for text in output_text]
print("HF generation:" , A )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(A )
hf_model.save_pretrained(A )
if push_to_hub:
processor.push_to_hub(f'''nielsr/{model_name}''' )
hf_model.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
_lowercase: Dict = argparse.ArgumentParser()
_lowercase: Tuple = [
"blip2-opt-2.7b",
"blip2-opt-6.7b",
"blip2-opt-2.7b-coco",
"blip2-opt-6.7b-coco",
"blip2-flan-t5-xl",
"blip2-flan-t5-xl-coco",
"blip2-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="blip2-opt-2.7b",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
_lowercase: Any = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 227 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'''s-JoL/Open-Llama-V1''': '''https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json''',
}
class lowercase_ ( __lowercase ):
UpperCamelCase_ : int = "open-llama"
def __init__( self : Optional[Any] , A__ : List[str]=100000 , A__ : Tuple=4096 , A__ : Dict=11008 , A__ : str=32 , A__ : Any=32 , A__ : int="silu" , A__ : List[Any]=2048 , A__ : Tuple=0.02 , A__ : List[Any]=1e-6 , A__ : Dict=True , A__ : int=0 , A__ : Tuple=1 , A__ : List[Any]=2 , A__ : Optional[Any]=False , A__ : Optional[int]=True , A__ : Optional[int]=0.1 , A__ : str=0.1 , A__ : int=True , A__ : Any=True , A__ : List[str]=None , **A__ : Dict , ) -> Optional[int]:
_snake_case = vocab_size
_snake_case = max_position_embeddings
_snake_case = hidden_size
_snake_case = intermediate_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = hidden_act
_snake_case = initializer_range
_snake_case = rms_norm_eps
_snake_case = use_cache
_snake_case = kwargs.pop(
'''use_memorry_efficient_attention''' , A__ )
_snake_case = hidden_dropout_prob
_snake_case = attention_dropout_prob
_snake_case = use_stable_embedding
_snake_case = shared_input_output_embedding
_snake_case = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ , tie_word_embeddings=A__ , **A__ , )
def UpperCamelCase_ ( self : Union[str, Any] ) -> Any:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , A__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"""got {self.rope_scaling}""" )
_snake_case = self.rope_scaling.get('''type''' , A__ )
_snake_case = self.rope_scaling.get('''factor''' , A__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(A__ , A__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 278 |
from __future__ import annotations
def snake_case_(_UpperCamelCase , _UpperCamelCase ) -> bool:
"""simple docstring"""
_snake_case = get_failure_array(_UpperCamelCase )
# 2) Step through text searching for pattern
_snake_case, _snake_case = 0, 0 # index into text, pattern
while i < len(_UpperCamelCase ):
if pattern[j] == text[i]:
if j == (len(_UpperCamelCase ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
_snake_case = failure[j - 1]
continue
i += 1
return False
def snake_case_(_UpperCamelCase ) -> list[int]:
"""simple docstring"""
_snake_case = [0]
_snake_case = 0
_snake_case = 1
while j < len(_UpperCamelCase ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
_snake_case = failure[i - 1]
continue
j += 1
failure.append(_UpperCamelCase )
return failure
if __name__ == "__main__":
# Test 1)
__A = '''abc1abc12'''
__A = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
__A = '''alskfjaldsk23adsfabcabc'''
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
__A = '''ABABX'''
__A = '''ABABZABABYABABX'''
assert kmp(pattern, text)
# Test 3)
__A = '''AAAB'''
__A = '''ABAAAAAB'''
assert kmp(pattern, text)
# Test 4)
__A = '''abcdabcy'''
__A = '''abcxabcdabxabcdabcdabcy'''
assert kmp(pattern, text)
# Test 5)
__A = '''aabaabaaa'''
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 278 | 1 |
"""simple docstring"""
from __future__ import annotations
from statistics import mean
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = [0] * no_of_processes
__lowerCAmelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(_lowerCAmelCase ):
__lowerCAmelCase = burst_time[i]
__lowerCAmelCase = []
__lowerCAmelCase = 0
__lowerCAmelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
__lowerCAmelCase = []
__lowerCAmelCase = -1
for i in range(_lowerCAmelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
__lowerCAmelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
__lowerCAmelCase = i
total_time += burst_time[target_process]
completed += 1
__lowerCAmelCase = 0
__lowerCAmelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = [0] * no_of_processes
for i in range(_lowerCAmelCase ):
__lowerCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('''[TEST CASE 01]''')
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = [2, 5, 3, 7]
SCREAMING_SNAKE_CASE_ = [0, 0, 0, 0]
SCREAMING_SNAKE_CASE_ = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
SCREAMING_SNAKE_CASE_ = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('''PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time''')
for i, process_id in enumerate(list(range(1, 5))):
print(
F"{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"
F"{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"
)
print(F"\nAverage waiting time = {mean(waiting_time):.5f}")
print(F"Average turnaround time = {mean(turn_around_time):.5f}")
| 301 |
"""simple docstring"""
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = [[0 for _ in range(_lowerCAmelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__lowerCAmelCase = 1
for n in range(m + 1 ):
for k in range(1 , _lowerCAmelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
SCREAMING_SNAKE_CASE_ = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
SCREAMING_SNAKE_CASE_ = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 301 | 1 |
import numpy as np
from PIL import Image
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = np.array(a__ )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
# compute the shape of the output matrix
SCREAMING_SNAKE_CASE : Union[str, Any] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
SCREAMING_SNAKE_CASE : List[Any] = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
SCREAMING_SNAKE_CASE : Optional[Any] = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : str = 0
return updated_arr
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = np.array(a__ )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : List[str] = 0
SCREAMING_SNAKE_CASE : Tuple = 0
# compute the shape of the output matrix
SCREAMING_SNAKE_CASE : List[Any] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
SCREAMING_SNAKE_CASE : List[str] = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
SCREAMING_SNAKE_CASE : int = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Dict = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='''avgpooling''', verbose=True)
# Loading the image
a__ : Optional[int] = Image.open('''path_to_image''')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 19 |
import math
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(a__ )
def UpperCAmelCase_( a__ = 1 / 12_345 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : int = 3
while True:
SCREAMING_SNAKE_CASE : Union[str, Any] = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(a__ ):
SCREAMING_SNAKE_CASE : List[str] = int(a__ )
total_partitions += 1
if check_partition_perfect(a__ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(a__ )
integer += 1
if __name__ == "__main__":
print(F"{solution() = }")
| 19 | 1 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
@add_end_docstrings(snake_case__ )
class A__ ( snake_case__ ):
"""simple docstring"""
def __init__( self , *__snake_case , **__snake_case ):
super().__init__(*__snake_case , **__snake_case )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def a_ ( self , __snake_case=None ):
snake_case = {}
if top_k is not None:
snake_case = top_k
return {}, {}, postprocess_params
def __call__( self , __snake_case , **__snake_case ):
return super().__call__(__snake_case , **__snake_case )
def a_ ( self , __snake_case ):
snake_case = load_image(__snake_case )
snake_case = self.image_processor(images=__snake_case , return_tensors=self.framework )
return model_inputs
def a_ ( self , __snake_case ):
snake_case = self.model(**__snake_case )
return model_outputs
def a_ ( self , __snake_case , __snake_case=5 ):
if top_k > self.model.config.num_labels:
snake_case = self.model.config.num_labels
if self.framework == "pt":
snake_case = model_outputs.logits.softmax(-1 )[0]
snake_case , snake_case = probs.topk(__snake_case )
elif self.framework == "tf":
snake_case = stable_softmax(model_outputs.logits , axis=-1 )[0]
snake_case = tf.math.top_k(__snake_case , k=__snake_case )
snake_case , snake_case = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
snake_case = scores.tolist()
snake_case = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__snake_case , __snake_case )]
| 127 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
_SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
@dataclass
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self , **__snake_case ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
snake_case = deprecated_arg[3:]
setattr(self , __snake_case , not kwargs.pop(__snake_case ) )
logger.warning(
F'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'''
F''' {positive_arg}={kwargs[positive_arg]}''' )
snake_case = kwargs.pop('''torchscript''' , self.torchscript )
snake_case = kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics )
snake_case = kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level )
super().__init__(**__snake_case )
__magic_name__ = field(default=snake_case__ , metadata={'help': 'Trace the models using torchscript'} )
__magic_name__ = field(default=snake_case__ , metadata={'help': 'Print Xla/PyTorch tpu metrics'} )
__magic_name__ = field(
default='O1' , metadata={
'help': (
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '
'See details at https://nvidia.github.io/apex/amp.html'
)
} , )
@cached_property
def a_ ( self ):
requires_backends(self , ['''torch'''] )
logger.info('''PyTorch: setting up devices''' )
if not self.cuda:
snake_case = torch.device('''cpu''' )
snake_case = 0
elif is_torch_tpu_available():
snake_case = xm.xla_device()
snake_case = 0
else:
snake_case = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
snake_case = torch.cuda.device_count()
return device, n_gpu
@property
def a_ ( self ):
return is_torch_tpu_available() and self.tpu
@property
def a_ ( self ):
requires_backends(self , ['''torch'''] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def a_ ( self ):
requires_backends(self , ['''torch'''] )
return self._setup_devices[0]
@property
def a_ ( self ):
requires_backends(self , ['''torch'''] )
return self._setup_devices[1]
@property
def a_ ( self ):
return self.n_gpu > 0
| 127 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a_ ( unittest.TestCase ):
@property
def lowercase__ ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase_ :Optional[int] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def lowercase__ ( self : Tuple ):
"""simple docstring"""
lowercase_ :Any = self.dummy_uncond_unet
lowercase_ :List[Any] = KarrasVeScheduler()
lowercase_ :List[Any] = KarrasVePipeline(unet=lowercase , scheduler=lowercase )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
lowercase_ :Dict = torch.manual_seed(0 )
lowercase_ :Union[str, Any] = pipe(num_inference_steps=2 , generator=lowercase , output_type="numpy" ).images
lowercase_ :Dict = torch.manual_seed(0 )
lowercase_ :Any = pipe(num_inference_steps=2 , generator=lowercase , output_type="numpy" , return_dict=lowercase )[0]
lowercase_ :Optional[int] = image[0, -3:, -3:, -1]
lowercase_ :Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase_ :int = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class a_ ( unittest.TestCase ):
def lowercase__ ( self : Any ):
"""simple docstring"""
lowercase_ :Optional[int] = "google/ncsnpp-celebahq-256"
lowercase_ :Any = UNetaDModel.from_pretrained(lowercase )
lowercase_ :Any = KarrasVeScheduler()
lowercase_ :List[Any] = KarrasVePipeline(unet=lowercase , scheduler=lowercase )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
lowercase_ :Union[str, Any] = torch.manual_seed(0 )
lowercase_ :Tuple = pipe(num_inference_steps=20 , generator=lowercase , output_type="numpy" ).images
lowercase_ :Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowercase_ :int = np.array([0.5_78, 0.58_11, 0.59_24, 0.58_09, 0.5_87, 0.58_86, 0.58_61, 0.58_02, 0.5_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 354 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
lowerCAmelCase : Tuple =logging.get_logger(__name__)
def UpperCAmelCase_ ( __lowerCamelCase : Union[tf.Tensor, np.ndarray] ):
if isinstance(__lowerCamelCase ,np.ndarray ):
return list(tensor.shape )
lowercase_ :Optional[int] = tf.shape(__lowerCamelCase )
if tensor.shape == tf.TensorShape(__lowerCamelCase ):
return dynamic
lowercase_ :Union[str, Any] = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(__lowerCamelCase )]
def UpperCAmelCase_ ( __lowerCamelCase : tf.Tensor ,__lowerCamelCase : Optional[int] = None ,__lowerCamelCase : Optional[str] = None ):
return tf.nn.softmax(logits=logits + 1e-9 ,axis=__lowerCamelCase ,name=__lowerCamelCase )
def UpperCAmelCase_ ( __lowerCamelCase : str ,__lowerCamelCase : List[Any] ,__lowerCamelCase : Any ,__lowerCamelCase : List[str]=1e-5 ,__lowerCamelCase : List[str]=-1 ):
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(__lowerCamelCase ,__lowerCamelCase ):
raise NotImplementedError("Only 1D weight and bias tensors are supported for now, with only a single axis." )
# Get mean and variance on the axis to be normalized
lowercase_ , lowercase_ :List[str] = tf.nn.moments(__lowerCamelCase ,axes=[axis] ,keepdims=__lowerCamelCase )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
lowercase_ :Union[str, Any] = [1] * inputs.shape.rank
lowercase_ :Optional[Any] = shape_list(__lowerCamelCase )[axis]
lowercase_ :List[str] = tf.reshape(__lowerCamelCase ,__lowerCamelCase )
lowercase_ :Dict = tf.reshape(__lowerCamelCase ,__lowerCamelCase )
# Compute layer normalization using the batch_normalization
# function.
lowercase_ :Union[str, Any] = tf.nn.batch_normalization(
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,offset=__lowerCamelCase ,scale=__lowerCamelCase ,variance_epsilon=__lowerCamelCase ,)
return outputs
def UpperCAmelCase_ ( __lowerCamelCase : Optional[int] ,__lowerCamelCase : Union[str, Any]=0 ,__lowerCamelCase : Dict=-1 ):
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
lowercase_ :Optional[int] = tf.shape(__lowerCamelCase )
lowercase_ :Optional[int] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
lowercase_ :List[str] = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] ,axis=0 )
return tf.reshape(__lowerCamelCase ,__lowerCamelCase )
def UpperCAmelCase_ ( __lowerCamelCase : tf.Tensor ):
if not isinstance(__lowerCamelCase ,tf.Tensor ):
lowercase_ :str = tf.convert_to_tensor(__lowerCamelCase ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
lowercase_ :List[Any] = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
lowercase_ :Optional[int] = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
lowercase_ :str = (
tf.cast(1 ,encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def UpperCAmelCase_ ( __lowerCamelCase : tf.Tensor ,__lowerCamelCase : int ,__lowerCamelCase : str = "input_ids" ):
tf.debugging.assert_less(
__lowerCamelCase ,tf.cast(__lowerCamelCase ,dtype=tensor.dtype ) ,message=(
F'The maximum value of {tensor_name} ({tf.math.reduce_max(__lowerCamelCase )}) must be smaller than the embedding '
F'layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'
) ,)
def UpperCAmelCase_ ( __lowerCamelCase : List[str] ,__lowerCamelCase : Tuple ,__lowerCamelCase : Dict ):
lowercase_ :int = 6_45_12
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
lowercase_ :Union[str, Any] = [x for x in data if len(__lowerCamelCase ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"The following attributes cannot be saved to HDF5 file because "
F'they are larger than {HDF5_OBJECT_HEADER_LIMIT} '
F'bytes: {bad_attributes}' )
lowercase_ :Union[str, Any] = np.asarray(__lowerCamelCase )
lowercase_ :Optional[int] = 1
lowercase_ :int = np.array_split(__lowerCamelCase ,__lowerCamelCase )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
lowercase_ :List[Any] = np.array_split(__lowerCamelCase ,__lowerCamelCase )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(__lowerCamelCase ):
lowercase_ :int = chunk_data
else:
lowercase_ :Tuple = data
def UpperCAmelCase_ ( __lowerCamelCase : str ,__lowerCamelCase : Tuple ):
if name in group.attrs:
lowercase_ :Optional[Any] = [n.decode("utf8" ) if hasattr(__lowerCamelCase ,"decode" ) else n for n in group.attrs[name]]
else:
lowercase_ :List[str] = []
lowercase_ :str = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("utf8" ) if hasattr(__lowerCamelCase ,"decode" ) else n for n in group.attrs["%s%d" % (name, chunk_id)]] )
chunk_id += 1
return data
def UpperCAmelCase_ ( __lowerCamelCase : str ):
def _expand_single_ad_tensor(__lowerCamelCase : Tuple ):
if isinstance(__lowerCamelCase ,tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(__lowerCamelCase ,axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor ,__lowerCamelCase )
| 147 | 0 |
"""simple docstring"""
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def lowercase ( _SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
_UpperCAmelCase = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'''`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '''
f'{test_file} instead.' )
_UpperCAmelCase = components[-1]
if not test_fn.endswith('''py''' ):
raise ValueError(f'`test_file` should be a python file. Got {test_fn} instead.' )
if not test_fn.startswith('''test_modeling_''' ):
raise ValueError(
f'`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.' )
_UpperCAmelCase = components[:-1] + [test_fn.replace('''.py''' , '''''' )]
_UpperCAmelCase = '''.'''.join(_SCREAMING_SNAKE_CASE )
return test_module_path
def lowercase ( _SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
_UpperCAmelCase = get_module_path(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = importlib.import_module(_SCREAMING_SNAKE_CASE )
return test_module
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
_UpperCAmelCase = []
_UpperCAmelCase = get_test_module(_SCREAMING_SNAKE_CASE )
for attr in dir(_SCREAMING_SNAKE_CASE ):
if attr.endswith('''ModelTester''' ):
tester_classes.append(getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
_UpperCAmelCase = []
_UpperCAmelCase = get_test_module(_SCREAMING_SNAKE_CASE )
for attr in dir(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
_UpperCAmelCase = getattr(_SCREAMING_SNAKE_CASE , '''all_model_classes''' , [] )
if len(_SCREAMING_SNAKE_CASE ) > 0:
test_classes.append(_SCREAMING_SNAKE_CASE )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
_UpperCAmelCase = get_test_classes(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def lowercase ( _SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
_UpperCAmelCase = test_class()
if hasattr(_SCREAMING_SNAKE_CASE , '''setUp''' ):
test.setUp()
_UpperCAmelCase = None
if hasattr(_SCREAMING_SNAKE_CASE , '''model_tester''' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
_UpperCAmelCase = test.model_tester.__class__
return model_tester
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
_UpperCAmelCase = get_test_classes(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(_SCREAMING_SNAKE_CASE )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
_UpperCAmelCase = get_test_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = []
for test_class in test_classes:
_UpperCAmelCase = get_model_tester_from_test_class(_SCREAMING_SNAKE_CASE )
if tester_class is not None:
tester_classes.append(_SCREAMING_SNAKE_CASE )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def lowercase ( _SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
_UpperCAmelCase = get_test_classes(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = {test_class: get_model_tester_from_test_class(_SCREAMING_SNAKE_CASE ) for test_class in test_classes}
return test_tester_mapping
def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
_UpperCAmelCase = get_model_classes(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = {
model_class: get_test_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for model_class in model_classes
}
return model_test_mapping
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
_UpperCAmelCase = get_model_classes(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = {
model_class: get_tester_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for model_class in model_classes
}
return model_to_tester_mapping
def lowercase ( _SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return o
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return o.__name__
elif isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ):
return [to_json(_SCREAMING_SNAKE_CASE ) for x in o]
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return {to_json(_SCREAMING_SNAKE_CASE ): to_json(_SCREAMING_SNAKE_CASE ) for k, v in o.items()}
else:
return o
| 260 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
while cur > 1:
# Find the maximum number in arr
_UpperCAmelCase = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
_UpperCAmelCase = arr[mi::-1] + arr[mi + 1 : len(_SCREAMING_SNAKE_CASE )]
# Reverse whole list
_UpperCAmelCase = arr[cur - 1 :: -1] + arr[cur : len(_SCREAMING_SNAKE_CASE )]
cur -= 1
return arr
if __name__ == "__main__":
__A : List[str] = input("Enter numbers separated by a comma:\n").strip()
__A : List[Any] = [int(item) for item in user_input.split(",")]
print(pancake_sort(unsorted))
| 260 | 1 |
'''simple docstring'''
import math
import unittest
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
assert isinstance(UpperCAmelCase__, UpperCAmelCase__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(UpperCAmelCase__ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class A__ ( unittest.TestCase ):
def snake_case_ ( self ) -> int:
'''simple docstring'''
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def snake_case_ ( self ) -> str:
'''simple docstring'''
with self.assertRaises(UpperCamelCase__ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , )
self.assertFalse(
is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 101 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class A__ ( _snake_case , unittest.TestCase ):
lowercase = ReformerTokenizer
lowercase = ReformerTokenizerFast
lowercase = True
lowercase = False
lowercase = True
def snake_case_ ( self ) -> str:
'''simple docstring'''
super().setUp()
A_ = ReformerTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = """<s>"""
A_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(UpperCamelCase__ ) , 1000 )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
A_ = self.get_tokenizer()
A_ = self.get_rust_tokenizer()
A_ = """I was born in 92000, and this is falsé."""
A_ = tokenizer.tokenize(UpperCamelCase__ )
A_ = rust_tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
A_ = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
A_ = rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
A_ = self.get_rust_tokenizer()
A_ = tokenizer.encode(UpperCamelCase__ )
A_ = rust_tokenizer.encode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__=15 ) -> int:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A_ = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
# Simple input
A_ = """This is a simple input"""
A_ = ["""This is a simple input 1""", """This is a simple input 2"""]
A_ = ("""This is a simple input""", """This is a pair""")
A_ = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="""max_length""" )
# Simple input
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="""max_length""" )
# Simple input
self.assertRaises(
UpperCamelCase__ , tokenizer_r.batch_encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="""max_length""" , )
# Pair input
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="""max_length""" )
# Pair input
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="""max_length""" )
# Pair input
self.assertRaises(
UpperCamelCase__ , tokenizer_r.batch_encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="""max_length""" , )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = ReformerTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
A_ = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(UpperCamelCase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [285, 46, 10, 170, 382] , )
A_ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
A_ = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
A_ = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
return ReformerTokenizer.from_pretrained("""google/reformer-crime-and-punishment""" )
@slow
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = """Hello World!"""
A_ = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) )
@slow
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
A_ = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) )
@require_torch
@slow
def snake_case_ ( self ) -> str:
'''simple docstring'''
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
A_ = list(self.big_tokenizer.get_vocab().keys() )[:10]
A_ = """ """.join(UpperCamelCase__ )
A_ = self.big_tokenizer.encode_plus(UpperCamelCase__ , return_tensors="""pt""" )
A_ = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="""pt""" )
A_ = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
A_ = encoded_sequence["""input_ids"""].shape
A_ = ReformerModel(UpperCamelCase__ )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCamelCase__ )
model(**UpperCamelCase__ )
@slow
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
# fmt: off
A_ = {"""input_ids""": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
A_ = [
"""This is a very simple sentence.""",
"""The quick brown fox jumps over the lazy dog.""",
]
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name="""google/reformer-crime-and-punishment""" , revision="""0e6c3decb8211d49bf881013425dc8b0448b3f5a""" , padding=UpperCamelCase__ , sequences=UpperCamelCase__ , )
| 101 | 1 |
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class A (SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : str = WavaVecaPhonemeCTCTokenizer
__lowerCamelCase : Optional[Any] = False
def a_ ( self : Tuple ) -> Any:
"""simple docstring"""
super().setUp()
A__ = (
"""<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː """
"""ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː """
"""ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 """
"""oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ """
"""pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ """
"""yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ """
"""əʊ S ɡʲ onɡ2 u\" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ """
"""ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ """
"""ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ """
"""uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ """
"""ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ """
"""ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ """
"""ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4"""
).split(""" """ )
A__ = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
A__ = {"""pad_token""": """<pad>""", """unk_token""": """<unk>""", """bos_token""": """<s>""", """eos_token""": """</s>"""}
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + """\n""" )
def a_ ( self : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : str=20 , __lowerCAmelCase : Optional[Any]=5 ) -> Tuple[str, list]:
"""simple docstring"""
A__ = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=__lowerCAmelCase )) for i in range(len(__lowerCAmelCase ) )]
A__ = list(filter(lambda __lowerCAmelCase : [t[0]] == tokenizer.encode(t[1] , do_phonemize=__lowerCAmelCase ) , __lowerCAmelCase ) )
if max_length is not None and len(__lowerCAmelCase ) > max_length:
A__ = toks[:max_length]
if min_length is not None and len(__lowerCAmelCase ) < min_length and len(__lowerCAmelCase ) > 0:
while len(__lowerCAmelCase ) < min_length:
A__ = toks + toks
# toks_str = [t[1] for t in toks]
A__ = [t[0] for t in toks]
# Ensure consistency
A__ = tokenizer.decode(__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
if " " not in output_txt and len(__lowerCAmelCase ) > 1:
A__ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__lowerCAmelCase )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__lowerCAmelCase )
)
if with_prefix_space:
A__ = """ """ + output_txt
A__ = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
return output_txt, output_ids
def a_ ( self : Any , **__lowerCAmelCase : str ) -> Tuple:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def a_ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
A__ = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
# check adding a single token
tokenizer.add_tokens("""xxx""" )
A__ = tokenizer("""m xxx ɪ""" , do_phonemize=__lowerCAmelCase ).input_ids
self.assertEqual(__lowerCAmelCase , [13, 3_92, 17] ) # xxx should be last token
tokenizer.add_tokens(["""aaa""", """bbb""", """ccc"""] )
A__ = tokenizer("""m aaa ɪ ccc""" , do_phonemize=__lowerCAmelCase ).input_ids
self.assertEqual(__lowerCAmelCase , [13, 3_93, 17, 3_95] ) # aaa and ccc should be after xxx and 2 after aaa
A__ = tokenizer("""maɪ c""" , do_phonemize=__lowerCAmelCase ).input_ids
self.assertEqual(__lowerCAmelCase , [3, 2_00] ) # mai should be <unk> (=3)
def a_ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
A__ = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
A__ = """Hello how are you"""
A__ = tokenizer.phonemize(__lowerCAmelCase , phonemizer_lang="""en-us""" )
self.assertEqual(__lowerCAmelCase , """h ə l oʊ h aʊ ɑːɹ j uː""" )
def a_ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
A__ = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
A__ = """Hello how are you"""
A__ = tokenizer.phonemize(__lowerCAmelCase , phonemizer_lang="""en-us""" )
self.assertEqual(tokenizer(__lowerCAmelCase ).input_ids , tokenizer(__lowerCAmelCase , do_phonemize=__lowerCAmelCase ).input_ids )
def a_ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
A__ = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
A__ = """Hello how are you"""
A__ = tokenizer.phonemize(__lowerCAmelCase , phonemizer_lang="""en-us""" )
A__ = tokenizer.decode(tokenizer(__lowerCAmelCase ).input_ids )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def a_ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
A__ = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
A__ = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
A__ = tokenizer.decode(sample_ids[0] )
A__ = tokenizer.batch_decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , batch_tokens[0] )
self.assertEqual(__lowerCAmelCase , ["""k s ɾ ɾ l ɭʲ""", """j ð s j ð s oːɹ"""] )
def a_ ( self : List[Any] ) -> str:
"""simple docstring"""
A__ = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
A__ = """Hello how are you"""
A__ = tokenizer.phonemize(__lowerCAmelCase , phonemizer_lang="""en-us""" )
self.assertEqual(__lowerCAmelCase , """h ə l oʊ | h aʊ | ɑːɹ | j uː |""" )
def a_ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
A__ = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
A__ = """Hello how are you"""
A__ = tokenizer.phonemize(__lowerCAmelCase , phonemizer_lang="""en-us""" )
self.assertEqual(tokenizer(__lowerCAmelCase ).input_ids , tokenizer(__lowerCAmelCase , do_phonemize=__lowerCAmelCase ).input_ids )
def a_ ( self : List[Any] ) -> Any:
"""simple docstring"""
A__ = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
# fmt: off
A__ = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
A__ = tokenizer.decode(sample_ids[0] )
A__ = tokenizer.batch_decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , batch_tokens[0] )
self.assertEqual(__lowerCAmelCase , ["""k s ɾ ɾ l ɭʲ""", """j ð s j ð s oːɹ"""] )
# decode with no word_del_token filter
A__ = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=__lowerCAmelCase )
A__ = tokenizer.batch_decode(__lowerCAmelCase , filter_word_delimiter_token=__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , batch_tokens[0] )
self.assertEqual(__lowerCAmelCase , ["""k s ɾ | ɾ l | ɭʲ""", """| j ð | s j ð s oːɹ"""] )
def a_ ( self : Dict ) -> Tuple:
"""simple docstring"""
A__ = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
A__ = """Hello how are you"""
A__ = tokenizer.phonemize(__lowerCAmelCase , phonemizer_lang="""en-us""" )
A__ = tokenizer.decode(tokenizer(__lowerCAmelCase ).input_ids , filter_word_delimiter_token=__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def a_ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
A__ = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
A__ = """Hello how are you"""
A__ = tokenizer.phonemize(__lowerCAmelCase , phonemizer_lang="""en-us""" )
A__ = tokenizer.decode(tokenizer(__lowerCAmelCase ).input_ids , filter_word_delimiter_token=__lowerCAmelCase )
self.assertEqual(""" """.join([p.strip() for p in phonemes.split(""" |""" )] ).strip() , __lowerCAmelCase )
def a_ ( self : List[str] ) -> Any:
"""simple docstring"""
A__ = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token=__lowerCAmelCase )
A__ = """Hello how are you"""
A__ = tokenizer(__lowerCAmelCase , phonemizer_lang="""en-us""" ).input_ids
A__ = tokenizer(__lowerCAmelCase , phonemizer_lang="""fr-fr""" ).input_ids
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
A__ = tokenizer.decode(__lowerCAmelCase )
A__ = tokenizer.decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , """h ə l oʊ h aʊ ɑːɹ j uː""" )
self.assertEqual(__lowerCAmelCase , """ɛ l o h aʊ a ʁ j u""" )
def a_ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
A__ = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
A__ = """Hello how Are you"""
A__ = """hello how are you"""
A__ = tokenizer(__lowerCAmelCase ).input_ids
A__ = tokenizer(__lowerCAmelCase ).input_ids
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def a_ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
A__ = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
tokenizer.add_tokens(["""!""", """?"""] )
tokenizer.add_special_tokens({"""cls_token""": """$$$"""} )
# fmt: off
A__ = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 3_92, 3_92, 3_93, 3_92, 3_92, 3_93, 3_94, 3_94],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 3_94, 3_94],
]
# fmt: on
A__ = tokenizer.batch_decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , ["""k s ɾ ɾ l ɭʲ!?!? $$$""", """j ð s j ð s oːɹ $$$"""] )
@staticmethod
def a_ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : int ) -> List[Any]:
"""simple docstring"""
A__ = [d[key] for d in offsets]
return retrieved_list
def a_ ( self : Dict ) -> int:
"""simple docstring"""
A__ = self.get_tokenizer(word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
A__ = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
A__ = tokenizer.decode(__lowerCAmelCase , output_char_offsets=__lowerCAmelCase , filter_word_delimiter_token=__lowerCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""char_offsets""" in outputs )
self.assertTrue(isinstance(__lowerCAmelCase , __lowerCAmelCase ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""char_offsets"""] , """char""" ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """char""" ) , ["""k""", """s""", """ɾ""", """ɾ""", """|""", """ɾ""", """l""", """|""", """ɭʲ"""] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """start_offset""" ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """end_offset""" ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def a_ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
A__ = self.get_tokenizer(word_delimiter_token="""|""" )
def check_list_tuples_equal(__lowerCAmelCase : Any , __lowerCAmelCase : Tuple ):
self.assertTrue(isinstance(__lowerCAmelCase , __lowerCAmelCase ) )
self.assertTrue(isinstance(outputs_list[0] , __lowerCAmelCase ) )
# transform list to ModelOutput
A__ = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch["""text"""] , outputs_batch_a["""text"""] )
def recursive_check(__lowerCAmelCase : List[str] , __lowerCAmelCase : int ):
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
[recursive_check(__lowerCAmelCase , __lowerCAmelCase ) for la, la in zip(__lowerCAmelCase , __lowerCAmelCase )]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch["""char_offsets"""] , outputs_batch_a["""char_offsets"""] )
# fmt: off
A__ = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
A__ = tokenizer.batch_decode(__lowerCAmelCase , output_char_offsets=__lowerCAmelCase )
A__ = [tokenizer.decode(__lowerCAmelCase , output_char_offsets=__lowerCAmelCase ) for ids in sample_ids]
check_list_tuples_equal(__lowerCAmelCase , __lowerCAmelCase )
@unittest.skip("""Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes""" )
def a_ ( self : Any ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip("""Wav2Vec2PhonemeTokenizer always puts spaces between phonemes""" )
def a_ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip("""encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency""" )
def a_ ( self : int ) -> str:
"""simple docstring"""
pass
@unittest.skip("""Wav2Vec2PhonemeModel has no max model length => no testing""" )
def a_ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
pass
def a_ ( self : str ) -> str:
"""simple docstring"""
A__ = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
A__ = tokenizer.vocab_size
A__ = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
A__ = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""]
A__ = tokenizer.add_tokens(__lowerCAmelCase )
A__ = tokenizer.vocab_size
A__ = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase , 0 )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , len(__lowerCAmelCase ) )
self.assertEqual(__lowerCAmelCase , all_size + len(__lowerCAmelCase ) )
A__ = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=__lowerCAmelCase )
self.assertGreaterEqual(len(__lowerCAmelCase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
A__ = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""}
A__ = tokenizer.add_special_tokens(__lowerCAmelCase )
A__ = tokenizer.vocab_size
A__ = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase , 0 )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , len(__lowerCAmelCase ) )
self.assertEqual(__lowerCAmelCase , all_size_a + len(__lowerCAmelCase ) )
A__ = tokenizer.encode(
""">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=__lowerCAmelCase )
self.assertGreaterEqual(len(__lowerCAmelCase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip("""The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.""" )
def a_ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip("""The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.""" )
def a_ ( self : Any ) -> int:
"""simple docstring"""
pass
def a_ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
A__ = self.get_tokenizers(fast=__lowerCAmelCase , do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
A__ = ["""ð""", """ɪ""", """s""", """ɪ""", """z""", """ɐ""", """t""", """ɛ""", """k""", """s""", """t"""]
A__ = tokenizer.convert_tokens_to_string(__lowerCAmelCase )
self.assertIsInstance(output["""text"""] , __lowerCAmelCase )
| 274 |
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def __lowerCamelCase ( __a :List[str] , __a :List[Any] , __a :Union[str, Any] , __a :List[Any] ) -> Dict:
"""simple docstring"""
A__ = multiprocessing.Manager()
A__ = manager.list()
A__ = multiprocessing.Process(target=__a , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append("""timed out""" )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def __lowerCamelCase ( __a :Optional[Any] , __a :Any , __a :List[Any] ) -> Union[str, Any]:
"""simple docstring"""
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
A__ = shutil.rmtree
A__ = os.rmdir
A__ = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
A__ = {}
with swallow_io():
with time_limit(__a ):
exec(__a , __a )
result.append("""passed""" )
except TimeoutException:
result.append("""timed out""" )
except BaseException as e:
result.append(F'failed: {e}' )
# Needed for cleaning up.
A__ = rmtree
A__ = rmdir
A__ = chdir
@contextlib.contextmanager
def __lowerCamelCase ( __a :List[str] ) -> Dict:
"""simple docstring"""
def signal_handler(__a :List[Any] , __a :Optional[Any] ):
raise TimeoutException("""Timed out!""" )
signal.setitimer(signal.ITIMER_REAL , __a )
signal.signal(signal.SIGALRM , __a )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
A__ = WriteOnlyStringIO()
with contextlib.redirect_stdout(__a ):
with contextlib.redirect_stderr(__a ):
with redirect_stdin(__a ):
yield
@contextlib.contextmanager
def __lowerCamelCase ( ) -> Dict:
"""simple docstring"""
with tempfile.TemporaryDirectory() as dirname:
with chdir(__a ):
yield dirname
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
pass
class A (io.StringIO ):
'''simple docstring'''
def a_ ( self : Any , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : str ) -> Dict:
"""simple docstring"""
raise OSError
def a_ ( self : Optional[Any] , *__lowerCAmelCase : Any , **__lowerCAmelCase : Optional[int] ) -> str:
"""simple docstring"""
raise OSError
def a_ ( self : Optional[Any] , *__lowerCAmelCase : Any , **__lowerCAmelCase : Any ) -> int:
"""simple docstring"""
raise OSError
def a_ ( self : str , *__lowerCAmelCase : Any , **__lowerCAmelCase : Union[str, Any] ) -> int:
"""simple docstring"""
return False
class A (contextlib._RedirectStream ): # type: ignore
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = '''stdin'''
@contextlib.contextmanager
def __lowerCamelCase ( __a :Union[str, Any] ) -> List[str]:
"""simple docstring"""
if root == ".":
yield
return
A__ = os.getcwd()
os.chdir(__a )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(__a )
def __lowerCamelCase ( __a :Union[str, Any]=None ) -> Dict:
"""simple docstring"""
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
A__ = None
A__ = None
import os
A__ = """1"""
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
import shutil
A__ = None
A__ = None
A__ = None
import subprocess
A__ = None # type: ignore
A__ = None
import sys
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
| 274 | 1 |
"""simple docstring"""
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
a :int = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def _lowercase ( __lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
config.addinivalue_line(
"""markers""" , """is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested""" )
config.addinivalue_line(
"""markers""" , """is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested""" )
config.addinivalue_line("""markers""" , """is_pipeline_test: mark test to run only when pipelines are tested""" )
config.addinivalue_line("""markers""" , """is_staging_test: mark test to run only in the staging environment""" )
config.addinivalue_line("""markers""" , """accelerate_tests: mark test that require accelerate""" )
config.addinivalue_line("""markers""" , """tool_tests: mark the tool tests that are run on their specific schedule""" )
def _lowercase ( __lowerCAmelCase ) -> Dict:
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__lowerCAmelCase )
def _lowercase ( __lowerCAmelCase ) -> int:
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE__ : int = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(__lowerCAmelCase , id=__lowerCAmelCase )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
"""simple docstring"""
if exitstatus == 5:
SCREAMING_SNAKE_CASE__ : List[Any] = 0
# Doctest custom flag to ignore output.
a :Any = doctest.register_optionflag("IGNORE_RESULT")
a :Union[str, Any] = doctest.OutputChecker
class __a (UpperCamelCase_):
'''simple docstring'''
def _a ( self , _a , _a , _a ) -> int:
"""simple docstring"""
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , _a , _a , _a )
a :int = CustomOutputChecker
a :Optional[int] = HfDoctestModule
a :Dict = HfDocTestParser
| 356 |
"""simple docstring"""
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
a :List[Any] = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
a :str = json.load(f)
@require_torch
class __a (unittest.TestCase):
'''simple docstring'''
def _a ( self , _a ) -> Optional[int]:
"""simple docstring"""
return FSMTTokenizer.from_pretrained(_a )
def _a ( self , _a ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = FSMTForConditionalGeneration.from_pretrained(_a ).to(_a )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["""en-ru""", 26.0],
["""ru-en""", 22.0],
["""en-de""", 22.0],
["""de-en""", 29.0],
] )
@slow
def _a ( self , _a , _a ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = f'''facebook/wmt19-{pair}'''
SCREAMING_SNAKE_CASE__ : Dict = self.get_tokenizer(_a )
SCREAMING_SNAKE_CASE__ : Any = self.get_model(_a )
SCREAMING_SNAKE_CASE__ : Tuple = bleu_data[pair]["""src"""]
SCREAMING_SNAKE_CASE__ : Any = bleu_data[pair]["""tgt"""]
SCREAMING_SNAKE_CASE__ : Any = tokenizer(_a , return_tensors="""pt""" , truncation=_a , padding="""longest""" ).to(_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.batch_decode(
_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a )
SCREAMING_SNAKE_CASE__ : Dict = calculate_bleu(_a , _a )
print(_a )
self.assertGreaterEqual(scores["""bleu"""] , _a )
| 56 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Tuple = logging.get_logger(__name__)
a_ : Dict = {
"""alibaba-damo/mgp-str-base""": """https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json""",
}
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = "mgp-str"
def __init__( self , UpperCamelCase=[32, 128] , UpperCamelCase=4 , UpperCamelCase=3 , UpperCamelCase=27 , UpperCamelCase=38 , UpperCamelCase=5_0257 , UpperCamelCase=3_0522 , UpperCamelCase=768 , UpperCamelCase=12 , UpperCamelCase=12 , UpperCamelCase=4.0 , UpperCamelCase=True , UpperCamelCase=False , UpperCamelCase=1e-5 , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=False , UpperCamelCase=0.02 , **UpperCamelCase , ):
"""simple docstring"""
super().__init__(**UpperCamelCase )
lowerCamelCase_ = image_size
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = max_token_length
lowerCamelCase_ = num_character_labels
lowerCamelCase_ = num_bpe_labels
lowerCamelCase_ = num_wordpiece_labels
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = mlp_ratio
lowerCamelCase_ = distilled
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = drop_rate
lowerCamelCase_ = qkv_bias
lowerCamelCase_ = attn_drop_rate
lowerCamelCase_ = drop_path_rate
lowerCamelCase_ = output_aa_attentions
lowerCamelCase_ = initializer_range
| 55 |
from __future__ import annotations
from collections.abc import Callable
UpperCAmelCase__ = list[list[float | int]]
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> Matrix:
"""simple docstring"""
_lowercase =len(__snake_case )
_lowercase =[[0 for _ in range(size + 1 )] for _ in range(__snake_case )]
_lowercase =42
_lowercase =42
_lowercase =42
_lowercase =42
_lowercase =42
_lowercase =42
for row in range(__snake_case ):
for col in range(__snake_case ):
_lowercase =matrix[row][col]
_lowercase =vector[row][0]
_lowercase =0
_lowercase =0
while row < size and col < size:
# pivoting
_lowercase =max((abs(augmented[rowa][col] ), rowa) for rowa in range(__snake_case , __snake_case ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_lowercase , _lowercase =augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , __snake_case ):
_lowercase =augmented[rowa][col] / augmented[row][col]
_lowercase =0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , __snake_case ):
for row in range(__snake_case ):
_lowercase =augmented[row][col] / augmented[col][col]
for cola in range(__snake_case , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(__snake_case )
]
def UpperCAmelCase_ ( __snake_case ) -> Callable[[int], int]:
"""simple docstring"""
_lowercase =len(__snake_case )
_lowercase =[[0 for _ in range(__snake_case )] for _ in range(__snake_case )]
_lowercase =[[0] for _ in range(__snake_case )]
_lowercase =42
_lowercase =42
_lowercase =42
_lowercase =42
for x_val, y_val in enumerate(__snake_case ):
for col in range(__snake_case ):
_lowercase =(x_val + 1) ** (size - col - 1)
_lowercase =y_val
_lowercase =solve(__snake_case , __snake_case )
def interpolated_func(__snake_case ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__snake_case ) )
return interpolated_func
def UpperCAmelCase_ ( __snake_case ) -> int:
"""simple docstring"""
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def UpperCAmelCase_ ( __snake_case = question_function , __snake_case = 10 ) -> int:
"""simple docstring"""
_lowercase =[func(__snake_case ) for x_val in range(1 , order + 1 )]
_lowercase =[
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_lowercase =0
_lowercase =42
_lowercase =42
for poly in polynomials:
_lowercase =1
while func(__snake_case ) == poly(__snake_case ):
x_val += 1
ret += poly(__snake_case )
return ret
if __name__ == "__main__":
print(f'''{solution() = }''')
| 5 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase = {
"""configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""],
"""processing_layoutlmv2""": ["""LayoutLMv2Processor"""],
"""tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ["""LayoutLMv2TokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ["""LayoutLMv2FeatureExtractor"""]
lowerCamelCase = ["""LayoutLMv2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv2ForQuestionAnswering""",
"""LayoutLMv2ForSequenceClassification""",
"""LayoutLMv2ForTokenClassification""",
"""LayoutLMv2Layer""",
"""LayoutLMv2Model""",
"""LayoutLMv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 367 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = ["""image_processor""", """tokenizer"""]
lowerCAmelCase__ = """CLIPImageProcessor"""
lowerCAmelCase__ = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : List[Any] , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : Optional[Any]=None , **_lowerCAmelCase : str):
'''simple docstring'''
__lowercase =None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _lowerCAmelCase , )
__lowercase =kwargs.pop('feature_extractor')
__lowercase =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(_lowerCAmelCase , _lowerCAmelCase)
def __call__( self : List[Any] , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Dict=None , _lowerCAmelCase : str=None , **_lowerCAmelCase : Union[str, Any]):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.')
if text is not None:
__lowercase =self.tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase)
if images is not None:
__lowercase =self.image_processor(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase)
if text is not None and images is not None:
__lowercase =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowerCAmelCase) , tensor_type=_lowerCAmelCase)
def __lowerCamelCase ( self : Tuple , *_lowerCAmelCase : str , **_lowerCAmelCase : int):
'''simple docstring'''
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase)
def __lowerCamelCase ( self : List[str] , *_lowerCAmelCase : Dict , **_lowerCAmelCase : Union[str, Any]):
'''simple docstring'''
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase)
@property
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__lowercase =self.tokenizer.model_input_names
__lowercase =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _lowerCAmelCase , )
return self.image_processor_class
@property
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _lowerCAmelCase , )
return self.image_processor
| 48 | 0 |
'''simple docstring'''
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
lowercase__ = argparse.ArgumentParser()
parser.add_argument("--user", type=str, default="ubuntu")
parser.add_argument("--host", type=str, default="localhost")
parser.add_argument("--key_path", type=str, default=None)
parser.add_argument("--instance", type=str, default="V100:1")
parser.add_argument("--provider", type=str, default="cheapest")
parser.add_argument("--use_spot", type=bool, default=False)
parser.add_argument("--example", type=str, default="pytorch/text-generation/run_generation.py")
lowercase__ , lowercase__ = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("Cannot specify both BYO and on-demand cluster args")
lowercase__ = rh.cluster(
name="rh-cluster", ips=[args.host], ssh_creds={"ssh_user": args.user, "ssh_private_key": args.key_path}
)
else:
lowercase__ = rh.cluster(
name="rh-cluster", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
lowercase__ = args.example.rsplit("/", 1)[0]
# Set up remote environment
cluster.install_packages(["pip:./"]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f'''pip install -r transformers/examples/{example_dir}/requirements.txt'''])
cluster.run(["pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f'''python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}'''])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 151 |
'''simple docstring'''
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def UpperCamelCase( ):
UpperCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument(
'-m' , '--pretrained_model_name_or_path' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , required=UpperCAmelCase_ , help='Path to pretrained model or model identifier from huggingface.co/models.' , )
parser.add_argument(
'-c' , '--caption' , type=UpperCAmelCase_ , default='robotic cat with wings' , help='Text used to generate images.' , )
parser.add_argument(
'-n' , '--images_num' , type=UpperCAmelCase_ , default=4 , help='How much images to generate.' , )
parser.add_argument(
'-s' , '--seed' , type=UpperCAmelCase_ , default=42 , help='Seed for random process.' , )
parser.add_argument(
'-ci' , '--cuda_id' , type=UpperCAmelCase_ , default=0 , help='cuda_id.' , )
UpperCAmelCase : str = parser.parse_args()
return args
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if not len(UpperCAmelCase_ ) == rows * cols:
raise ValueError('The specified number of rows and columns are not correct.' )
UpperCAmelCase , UpperCAmelCase : List[Any] = imgs[0].size
UpperCAmelCase : str = Image.new('RGB' , size=(cols * w, rows * h) )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = grid.size
for i, img in enumerate(UpperCAmelCase_ ):
grid.paste(UpperCAmelCase_ , box=(i % cols * w, i // cols * h) )
return grid
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_="robotic cat with wings" , UpperCAmelCase_=7.5 , UpperCAmelCase_=50 , UpperCAmelCase_=1 , UpperCAmelCase_=42 , ):
UpperCAmelCase : Optional[int] = torch.Generator(pipeline.device ).manual_seed(UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = pipeline(
UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , generator=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , ).images
UpperCAmelCase : Dict = int(math.sqrt(UpperCAmelCase_ ) )
UpperCAmelCase : Optional[int] = image_grid(UpperCAmelCase_ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
lowercase__ = parse_args()
# Load models and create wrapper for stable diffusion
lowercase__ = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer")
lowercase__ = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder")
lowercase__ = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae")
lowercase__ = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet")
lowercase__ = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
lowercase__ = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, "best_model.pt")):
lowercase__ = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, "unet", unet)
else:
lowercase__ = unet.to(torch.device("cuda", args.cuda_id))
lowercase__ = pipeline.to(unet.device)
lowercase__ , lowercase__ = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, "{}.png".format("_".join(args.caption.split()))))
lowercase__ = os.path.join(args.pretrained_model_name_or_path, "_".join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, "{}.png".format(idx + 1)))
| 151 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : List[Any] = logging.get_logger(__name__)
lowercase : int = {
"""facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
_a : Tuple= 'vit_mae'
def __init__( self ,snake_case=768 ,snake_case=12 ,snake_case=12 ,snake_case=3072 ,snake_case="gelu" ,snake_case=0.0 ,snake_case=0.0 ,snake_case=0.02 ,snake_case=1e-12 ,snake_case=224 ,snake_case=16 ,snake_case=3 ,snake_case=True ,snake_case=16 ,snake_case=512 ,snake_case=8 ,snake_case=2048 ,snake_case=0.75 ,snake_case=False ,**snake_case ,):
'''simple docstring'''
super().__init__(**_a )
lowercase : List[Any] = hidden_size
lowercase : Optional[Any] = num_hidden_layers
lowercase : Any = num_attention_heads
lowercase : Tuple = intermediate_size
lowercase : List[str] = hidden_act
lowercase : Union[str, Any] = hidden_dropout_prob
lowercase : int = attention_probs_dropout_prob
lowercase : List[str] = initializer_range
lowercase : Dict = layer_norm_eps
lowercase : Union[str, Any] = image_size
lowercase : Any = patch_size
lowercase : int = num_channels
lowercase : Optional[Any] = qkv_bias
lowercase : Optional[Any] = decoder_num_attention_heads
lowercase : Dict = decoder_hidden_size
lowercase : Dict = decoder_num_hidden_layers
lowercase : Tuple = decoder_intermediate_size
lowercase : Tuple = mask_ratio
lowercase : int = norm_pix_loss
| 361 |
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
lowercase : str = mf_knapsack(i - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
lowercase : Optional[int] = max(
mf_knapsack(i - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , mf_knapsack(i - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , j - wt[i - 1] ) + val[i - 1] , )
lowercase : List[Any] = val
return f[i][j]
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Any:
lowercase : Optional[int] = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
lowercase : Union[str, Any] = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
lowercase : Any = dp[i - 1][w_]
return dp[n][w_], dp
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
if not (isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ) and isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) )):
raise ValueError(
"""Both the weights and values vectors must be either lists or tuples""" )
lowercase : Optional[Any] = len(SCREAMING_SNAKE_CASE__ )
if num_items != len(SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[Any] = (
"""The number of weights must be the same as the number of values.\n"""
f"But got {num_items} weights and {len(SCREAMING_SNAKE_CASE__ )} values"
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
for i in range(SCREAMING_SNAKE_CASE__ ):
if not isinstance(wt[i] , SCREAMING_SNAKE_CASE__ ):
lowercase : Tuple = (
"""All weights must be integers but got weight of """
f"type {type(wt[i] )} at index {i}"
)
raise TypeError(SCREAMING_SNAKE_CASE__ )
lowercase , lowercase : Any = knapsack(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : set = set()
_construct_solution(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return optimal_val, example_optional_set
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , i - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
optimal_set.add(SCREAMING_SNAKE_CASE__ )
_construct_solution(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , i - 1 , j - wt[i - 1] , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowercase : Dict = [3, 2, 4, 4]
lowercase : List[Any] = [4, 3, 2, 3]
lowercase : Tuple = 4
lowercase : Tuple = 6
lowercase : Tuple = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
lowercase , lowercase : List[str] = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
lowercase , lowercase : Optional[int] = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("""optimal_value = """, optimal_solution)
print("""An optimal subset corresponding to the optimal value""", optimal_subset)
| 285 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_A = {
'''configuration_altclip''': [
'''ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AltCLIPConfig''',
'''AltCLIPTextConfig''',
'''AltCLIPVisionConfig''',
],
'''processing_altclip''': ['''AltCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'''ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AltCLIPPreTrainedModel''',
'''AltCLIPModel''',
'''AltCLIPTextModel''',
'''AltCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 278 |
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'''nielsr/canine-s''': 2_048,
}
# Unicode defines 1,114,112 total “codepoints”
_A = 1_114_112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
_A = 0
_A = 0xe0_00
_A = 0xe0_01
_A = 0xe0_02
_A = 0xe0_03
_A = 0xe0_04
# Maps special codepoints to human-readable names.
_A = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
_A = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class A ( __UpperCAmelCase ):
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self, UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=False, UpperCamelCase__=2048, **UpperCamelCase__, ):
"""simple docstring"""
lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else bos_token
lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else eos_token
lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else sep_token
lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else cls_token
lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else mask_token
super().__init__(
bos_token=UpperCamelCase__, eos_token=UpperCamelCase__, sep_token=UpperCamelCase__, cls_token=UpperCamelCase__, pad_token=UpperCamelCase__, mask_token=UpperCamelCase__, add_prefix_space=UpperCamelCase__, model_max_length=UpperCamelCase__, **UpperCamelCase__, )
# Creates a mapping for looking up the IDs of special symbols.
lowerCAmelCase_ = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
lowerCAmelCase_ = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
lowerCAmelCase_ = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
lowerCAmelCase_ = UNICODE_VOCAB_SIZE
lowerCAmelCase_ = len(self._special_codepoints )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return self._unicode_vocab_size
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
return list(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
try:
return ord(UpperCamelCase__ )
except TypeError:
raise ValueError(f"invalid token: '{token}'" )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(UpperCamelCase__ )
except TypeError:
raise ValueError(f"invalid id: {index}" )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
return "".join(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ):
"""simple docstring"""
lowerCAmelCase_ = [self.sep_token_id]
lowerCAmelCase_ = [self.cls_token_id]
lowerCAmelCase_ = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None, UpperCamelCase__ = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__, token_ids_a=UpperCamelCase__, already_has_special_tokens=UpperCamelCase__ )
lowerCAmelCase_ = [1] + ([0] * len(UpperCamelCase__ )) + [1]
if token_ids_a is not None:
result += ([0] * len(UpperCamelCase__ )) + [1]
return result
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ):
"""simple docstring"""
lowerCAmelCase_ = [self.sep_token_id]
lowerCAmelCase_ = [self.cls_token_id]
lowerCAmelCase_ = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ):
"""simple docstring"""
return ()
| 278 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ : Optional[Any] = logging.get_logger(__name__)
a__ : List[Any] = {
'xlm-mlm-en-2048': 'https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json',
'xlm-mlm-ende-1024': 'https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json',
'xlm-mlm-enfr-1024': 'https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json',
'xlm-mlm-enro-1024': 'https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json',
'xlm-mlm-tlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json',
'xlm-mlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json',
'xlm-clm-enfr-1024': 'https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json',
'xlm-clm-ende-1024': 'https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json',
'xlm-mlm-17-1280': 'https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json',
'xlm-mlm-100-1280': 'https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json',
}
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = '''xlm'''
__SCREAMING_SNAKE_CASE = {
'''hidden_size''': '''emb_dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
'''n_words''': '''vocab_size''', # For backward compatibility
}
def __init__( self , lowercase=3_0_1_4_5 , lowercase=2_0_4_8 , lowercase=1_2 , lowercase=1_6 , lowercase=0.1 , lowercase=0.1 , lowercase=True , lowercase=False , lowercase=False , lowercase=False , lowercase=1 , lowercase=True , lowercase=5_1_2 , lowercase=2_0_4_8**-0.5 , lowercase=1E-12 , lowercase=0.02 , lowercase=0 , lowercase=1 , lowercase=2 , lowercase=3 , lowercase=5 , lowercase=True , lowercase="first" , lowercase=True , lowercase=None , lowercase=True , lowercase=0.1 , lowercase=5 , lowercase=5 , lowercase=0 , lowercase=0 , lowercase=2 , lowercase=0 , **lowercase , ) -> Tuple:
__UpperCamelCase = vocab_size
__UpperCamelCase = emb_dim
__UpperCamelCase = n_layers
__UpperCamelCase = n_heads
__UpperCamelCase = dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = gelu_activation
__UpperCamelCase = sinusoidal_embeddings
__UpperCamelCase = causal
__UpperCamelCase = asm
__UpperCamelCase = n_langs
__UpperCamelCase = use_lang_emb
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = bos_index
__UpperCamelCase = eos_index
__UpperCamelCase = pad_index
__UpperCamelCase = unk_index
__UpperCamelCase = mask_index
__UpperCamelCase = is_encoder
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = embed_init_std
__UpperCamelCase = init_std
__UpperCamelCase = summary_type
__UpperCamelCase = summary_use_proj
__UpperCamelCase = summary_activation
__UpperCamelCase = summary_proj_to_labels
__UpperCamelCase = summary_first_dropout
__UpperCamelCase = start_n_top
__UpperCamelCase = end_n_top
__UpperCamelCase = mask_token_id
__UpperCamelCase = lang_id
if "n_words" in kwargs:
__UpperCamelCase = kwargs["""n_words"""]
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , **lowercase )
class UpperCAmelCase__ ( UpperCAmelCase_):
@property
def __lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__UpperCamelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__UpperCamelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 370 |
'''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = DistilBertTokenizer
__SCREAMING_SNAKE_CASE = DistilBertTokenizerFast
__SCREAMING_SNAKE_CASE = True
@slow
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" )
__UpperCamelCase = tokenizer.encode("""sequence builders""" , add_special_tokens=lowercase )
__UpperCamelCase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowercase )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(lowercase )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 243 | 0 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def UpperCAmelCase ( lowercase , lowercase , lowercase = 10**-10 ):
"""simple docstring"""
__lowercase = a
while True:
__lowercase = Decimal(lowercase ) - (
Decimal(eval(lowercase ) ) / Decimal(eval(str(diff(lowercase ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(lowercase ) ) < precision: # noqa: S307
return float(lowercase )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''')
# Find root of polynomial
print(F'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}''')
# Find Square Root of 5
print(F'''The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}''')
# Exponential Roots
print(F'''The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}''')
| 210 |
import warnings
from functools import wraps
from typing import Callable
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
@wraps(lowercase )
def _inner_fn(*lowercase , **lowercase ):
warnings.warn(
(F"'{fn.__name__}' is experimental and might be subject to breaking changes in the future.") , lowercase , )
return fn(*lowercase , **lowercase )
return _inner_fn
| 210 | 1 |
'''simple docstring'''
from collections import Counter
from timeit import timeit
def SCREAMING_SNAKE_CASE__ ( __A = "" , ) -> bool:
return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2
def SCREAMING_SNAKE_CASE__ ( __A = "" ) -> bool:
if len(__A ) == 0:
return True
_snake_case = input_str.replace(' ' , '' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
_snake_case = {}
for character in lower_case_input_str:
_snake_case = character_freq_dict.get(__A , 0 ) + 1
_snake_case = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def SCREAMING_SNAKE_CASE__ ( __A = "" ) -> None:
print('\nFor string = ' , __A , ':' )
print(
'> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(__A ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
print(
'> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(__A ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
if __name__ == "__main__":
lowercase : List[str] = input(
"Enter string to determine if it can be rearranged as a palindrome or not: "
).strip()
benchmark(check_str)
lowercase : Union[str, Any] = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F'''{check_str} can {"" if status else "not "}be rearranged as a palindrome''')
| 160 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class __UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = inspect.getfile(accelerate.test_utils )
_snake_case = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
_snake_case = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] )
_snake_case = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] )
@require_multi_gpu
def lowerCamelCase ( self ):
"""simple docstring"""
print(F'Found {torch.cuda.device_count()} devices.' )
_snake_case = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCAmelCase_ , env=os.environ.copy() )
@require_multi_gpu
def lowerCamelCase ( self ):
"""simple docstring"""
print(F'Found {torch.cuda.device_count()} devices.' )
_snake_case = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', self.operation_file_path]
print(F'Command: {cmd}' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCAmelCase_ , env=os.environ.copy() )
@require_multi_gpu
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCAmelCase_ , env=os.environ.copy() )
@require_multi_gpu
def lowerCamelCase ( self ):
"""simple docstring"""
print(F'Found {torch.cuda.device_count()} devices, using 2 devices only' )
_snake_case = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='0,1' ):
execute_subprocess_async(lowerCAmelCase_ , env=os.environ.copy() )
if __name__ == "__main__":
lowercase : Tuple = Accelerator()
lowercase : Optional[int] = (accelerator.state.process_index + 2, 10)
lowercase : Any = torch.randint(0, 10, shape).to(accelerator.device)
lowercase : Union[str, Any] = ""
lowercase : Dict = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
lowercase : int = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
lowercase : Any = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 160 | 1 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import List, Optional
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
def __init__( self : Optional[Any] ) -> Optional[Any]:
# test for the above condition
self.test()
def __magic_name__ ( self : Optional[int] ) -> int:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =0
SCREAMING_SNAKE_CASE__ : Tuple =False
while not completed:
if counter == 1:
self.reset()
SCREAMING_SNAKE_CASE__ : List[Any] =self.advance()
if not self.does_advance(__lowercase ):
raise Exception(
'''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] =self.update(__lowercase )
counter += 1
if counter > 1_00_00:
raise Exception('''update() does not fulfill the constraint.''' )
if self.remaining() != 0:
raise Exception('''Custom Constraint is not defined correctly.''' )
@abstractmethod
def __magic_name__ ( self : str ) -> Union[str, Any]:
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def __magic_name__ ( self : int , __lowercase : int ) -> Optional[int]:
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def __magic_name__ ( self : Optional[int] , __lowercase : int ) -> Dict:
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def __magic_name__ ( self : Union[str, Any] ) -> Tuple:
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def __magic_name__ ( self : Any ) -> Optional[Any]:
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def __magic_name__ ( self : Any , __lowercase : Optional[Any]=False ) -> List[str]:
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
def __init__( self : Tuple , __lowercase : List[int] ) -> Optional[Any]:
super(__lowercase , self ).__init__()
if not isinstance(__lowercase , __lowercase ) or len(__lowercase ) == 0:
raise ValueError(F"`token_ids` has to be a non-empty list, but is {token_ids}." )
if any((not isinstance(__lowercase , __lowercase ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F"Each list in `token_ids` has to be a list of positive integers, but is {token_ids}." )
SCREAMING_SNAKE_CASE__ : Dict =token_ids
SCREAMING_SNAKE_CASE__ : Any =len(self.token_ids )
SCREAMING_SNAKE_CASE__ : int =-1 # the index of the currently fulfilled step
SCREAMING_SNAKE_CASE__ : Optional[int] =False
def __magic_name__ ( self : Dict ) -> Dict:
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def __magic_name__ ( self : Dict , __lowercase : int ) -> List[Any]:
if not isinstance(__lowercase , __lowercase ):
raise ValueError(F"`token_id` has to be an `int`, but is {token_id} of type {type(__lowercase )}" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def __magic_name__ ( self : Union[str, Any] , __lowercase : int ) -> int:
if not isinstance(__lowercase , __lowercase ):
raise ValueError(F"`token_id` has to be an `int`, but is {token_id} of type {type(__lowercase )}" )
SCREAMING_SNAKE_CASE__ : Any =False
SCREAMING_SNAKE_CASE__ : int =False
SCREAMING_SNAKE_CASE__ : str =False
if self.does_advance(__lowercase ):
self.fulfilled_idx += 1
SCREAMING_SNAKE_CASE__ : Tuple =True
if self.fulfilled_idx == (self.seqlen - 1):
SCREAMING_SNAKE_CASE__ : List[str] =True
SCREAMING_SNAKE_CASE__ : Dict =completed
else:
# failed to make progress.
SCREAMING_SNAKE_CASE__ : List[str] =True
self.reset()
return stepped, completed, reset
def __magic_name__ ( self : Union[str, Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =False
SCREAMING_SNAKE_CASE__ : Optional[Any] =0
def __magic_name__ ( self : int ) -> int:
return self.seqlen - (self.fulfilled_idx + 1)
def __magic_name__ ( self : Optional[Any] , __lowercase : List[str]=False ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =PhrasalConstraint(self.token_ids )
if stateful:
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.seqlen
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.fulfilled_idx
SCREAMING_SNAKE_CASE__ : List[str] =self.completed
return new_constraint
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , __lowercase : List[List[int]] , __lowercase : Union[str, Any]=True ) -> int:
SCREAMING_SNAKE_CASE__ : Dict =max([len(__lowercase ) for one in nested_token_ids] )
SCREAMING_SNAKE_CASE__ : int ={}
for token_ids in nested_token_ids:
SCREAMING_SNAKE_CASE__ : str =root
for tidx, token_id in enumerate(__lowercase ):
if token_id not in level:
SCREAMING_SNAKE_CASE__ : Optional[Any] ={}
SCREAMING_SNAKE_CASE__ : List[Any] =level[token_id]
if no_subsets and self.has_subsets(__lowercase , __lowercase ):
raise ValueError(
'''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'''
F" {nested_token_ids}." )
SCREAMING_SNAKE_CASE__ : str =root
def __magic_name__ ( self : int , __lowercase : Optional[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ : int =self.trie
for current_token in current_seq:
SCREAMING_SNAKE_CASE__ : List[Any] =start[current_token]
SCREAMING_SNAKE_CASE__ : Tuple =list(start.keys() )
return next_tokens
def __magic_name__ ( self : str , __lowercase : str ) -> Dict:
SCREAMING_SNAKE_CASE__ : int =self.next_tokens(__lowercase )
return len(__lowercase ) == 0
def __magic_name__ ( self : str , __lowercase : List[str] ) -> Any:
SCREAMING_SNAKE_CASE__ : Optional[int] =list(root.values() )
if len(__lowercase ) == 0:
return 1
else:
return sum([self.count_leaves(__lowercase ) for nn in next_nodes] )
def __magic_name__ ( self : Tuple , __lowercase : List[Any] , __lowercase : Optional[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : Dict =self.count_leaves(__lowercase )
return len(__lowercase ) != leaf_count
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
def __init__( self : Union[str, Any] , __lowercase : List[List[int]] ) -> Any:
super(__lowercase , self ).__init__()
if not isinstance(__lowercase , __lowercase ) or len(__lowercase ) == 0:
raise ValueError(F"`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}." )
if any(not isinstance(__lowercase , __lowercase ) for token_ids in nested_token_ids ):
raise ValueError(F"`nested_token_ids` has to be a list of lists, but is {nested_token_ids}." )
if any(
any((not isinstance(__lowercase , __lowercase ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F"Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}." )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =DisjunctiveTrie(__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =nested_token_ids
SCREAMING_SNAKE_CASE__ : Any =self.trie.max_height
SCREAMING_SNAKE_CASE__ : str =[]
SCREAMING_SNAKE_CASE__ : Dict =False
def __magic_name__ ( self : Optional[int] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : str =self.trie.next_tokens(self.current_seq )
if len(__lowercase ) == 0:
return None
else:
return token_list
def __magic_name__ ( self : Any , __lowercase : int ) -> List[str]:
if not isinstance(__lowercase , __lowercase ):
raise ValueError(F"`token_id` is supposed to be type `int`, but is {token_id} of type {type(__lowercase )}" )
SCREAMING_SNAKE_CASE__ : str =self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def __magic_name__ ( self : Tuple , __lowercase : int ) -> Optional[int]:
if not isinstance(__lowercase , __lowercase ):
raise ValueError(F"`token_id` is supposed to be type `int`, but is {token_id} of type {type(__lowercase )}" )
SCREAMING_SNAKE_CASE__ : Optional[Any] =False
SCREAMING_SNAKE_CASE__ : List[str] =False
SCREAMING_SNAKE_CASE__ : Any =False
if self.does_advance(__lowercase ):
self.current_seq.append(__lowercase )
SCREAMING_SNAKE_CASE__ : int =True
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] =True
self.reset()
SCREAMING_SNAKE_CASE__ : int =self.trie.reached_leaf(self.current_seq )
SCREAMING_SNAKE_CASE__ : List[Any] =completed
return stepped, completed, reset
def __magic_name__ ( self : List[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ : Dict =False
SCREAMING_SNAKE_CASE__ : List[Any] =[]
def __magic_name__ ( self : int ) -> Optional[int]:
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def __magic_name__ ( self : Optional[int] , __lowercase : Optional[Any]=False ) -> Any:
SCREAMING_SNAKE_CASE__ : List[Any] =DisjunctiveConstraint(self.token_ids )
if stateful:
SCREAMING_SNAKE_CASE__ : List[str] =self.seqlen
SCREAMING_SNAKE_CASE__ : Dict =self.current_seq
SCREAMING_SNAKE_CASE__ : Any =self.completed
return new_constraint
class __SCREAMING_SNAKE_CASE :
def __init__( self : Tuple , __lowercase : List[Constraint] ) -> Dict:
SCREAMING_SNAKE_CASE__ : Dict =constraints
# max # of steps required to fulfill a given constraint
SCREAMING_SNAKE_CASE__ : Dict =max([c.seqlen for c in constraints] )
SCREAMING_SNAKE_CASE__ : Optional[Any] =len(__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =False
self.init_state()
def __magic_name__ ( self : int ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Dict =[]
SCREAMING_SNAKE_CASE__ : Dict =None
SCREAMING_SNAKE_CASE__ : Any =[constraint.copy(stateful=__lowercase ) for constraint in self.constraints]
def __magic_name__ ( self : Dict ) -> Any:
SCREAMING_SNAKE_CASE__ : List[str] =0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def __magic_name__ ( self : Optional[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Any =[]
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
SCREAMING_SNAKE_CASE__ : Tuple =constraint.advance()
if isinstance(__lowercase , __lowercase ):
token_list.append(__lowercase )
elif isinstance(__lowercase , __lowercase ):
token_list.extend(__lowercase )
else:
SCREAMING_SNAKE_CASE__ : Any =self.inprogress_constraint.advance()
if isinstance(__lowercase , __lowercase ):
token_list.append(__lowercase )
elif isinstance(__lowercase , __lowercase ):
token_list.extend(__lowercase )
if len(__lowercase ) == 0:
return None
else:
return token_list
def __magic_name__ ( self : Union[str, Any] , __lowercase : Optional[List[int]] ) -> Any:
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any =self.add(__lowercase )
# the entire list of constraints are fulfilled
if self.completed:
break
def __magic_name__ ( self : Dict , __lowercase : int ) -> Dict:
if not isinstance(__lowercase , __lowercase ):
raise ValueError(F"`token_id` should be an `int`, but is `{token_id}`." )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] =False, False
if self.completed:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =True
SCREAMING_SNAKE_CASE__ : int =False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str =self.inprogress_constraint.update(__lowercase )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=__lowercase ) )
SCREAMING_SNAKE_CASE__ : Any =None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
SCREAMING_SNAKE_CASE__ : Dict =None
if len(self.pending_constraints ) == 0:
# we're done!
SCREAMING_SNAKE_CASE__ : Optional[int] =True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(__lowercase ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] =pending_constraint.update(__lowercase )
if not stepped:
raise Exception(
'''`constraint.update(token_id)` is not yielding incremental progress, '''
'''even though `constraint.does_advance(token_id)` is true.''' )
if complete:
self.complete_constraints.append(__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =None
if not complete and stepped:
SCREAMING_SNAKE_CASE__ : Optional[int] =pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
SCREAMING_SNAKE_CASE__ : int =(
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
SCREAMING_SNAKE_CASE__ : List[str] =True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def __magic_name__ ( self : str , __lowercase : List[str]=True ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Dict =ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
SCREAMING_SNAKE_CASE__ : Any =[
constraint.copy(stateful=__lowercase ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
SCREAMING_SNAKE_CASE__ : Tuple =self.inprogress_constraint.copy(stateful=__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =[constraint.copy() for constraint in self.pending_constraints]
return new_state
| 152 |
'''simple docstring'''
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('.')
def _a( UpperCamelCase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str =test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'''`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '''
f"{test_file} instead." )
SCREAMING_SNAKE_CASE__ : str =components[-1]
if not test_fn.endswith('''py''' ):
raise ValueError(f"`test_file` should be a python file. Got {test_fn} instead." )
if not test_fn.startswith('''test_modeling_''' ):
raise ValueError(
f"`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead." )
SCREAMING_SNAKE_CASE__ : Any =components[:-1] + [test_fn.replace('''.py''', '''''' )]
SCREAMING_SNAKE_CASE__ : List[str] ='''.'''.join(UpperCamelCase__ )
return test_module_path
def _a( UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =get_module_path(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =importlib.import_module(UpperCamelCase__ )
return test_module
def _a( UpperCamelCase__ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] =[]
SCREAMING_SNAKE_CASE__ : List[Any] =get_test_module(UpperCamelCase__ )
for attr in dir(UpperCamelCase__ ):
if attr.endswith('''ModelTester''' ):
tester_classes.append(getattr(UpperCamelCase__, UpperCamelCase__ ) )
# sort with class names
return sorted(UpperCamelCase__, key=lambda UpperCamelCase__ : x.__name__ )
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =[]
SCREAMING_SNAKE_CASE__ : Any =get_test_module(UpperCamelCase__ )
for attr in dir(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : List[Any] =getattr(UpperCamelCase__, UpperCamelCase__ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
SCREAMING_SNAKE_CASE__ : Any =getattr(UpperCamelCase__, '''all_model_classes''', [] )
if len(UpperCamelCase__ ) > 0:
test_classes.append(UpperCamelCase__ )
# sort with class names
return sorted(UpperCamelCase__, key=lambda UpperCamelCase__ : x.__name__ )
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =get_test_classes(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : int =set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(UpperCamelCase__, key=lambda UpperCamelCase__ : x.__name__ )
def _a( UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any =test_class()
if hasattr(UpperCamelCase__, '''setUp''' ):
test.setUp()
SCREAMING_SNAKE_CASE__ : List[Any] =None
if hasattr(UpperCamelCase__, '''model_tester''' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =test.model_tester.__class__
return model_tester
def _a( UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any =get_test_classes(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : str =[]
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(UpperCamelCase__ )
# sort with class names
return sorted(UpperCamelCase__, key=lambda UpperCamelCase__ : x.__name__ )
def _a( UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] =get_test_classes_for_model(UpperCamelCase__, UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Any =[]
for test_class in test_classes:
SCREAMING_SNAKE_CASE__ : List[str] =get_model_tester_from_test_class(UpperCamelCase__ )
if tester_class is not None:
tester_classes.append(UpperCamelCase__ )
# sort with class names
return sorted(UpperCamelCase__, key=lambda UpperCamelCase__ : x.__name__ )
def _a( UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict =get_test_classes(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] ={test_class: get_model_tester_from_test_class(UpperCamelCase__ ) for test_class in test_classes}
return test_tester_mapping
def _a( UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =get_model_classes(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : int ={
model_class: get_test_classes_for_model(UpperCamelCase__, UpperCamelCase__ ) for model_class in model_classes
}
return model_test_mapping
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =get_model_classes(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Any ={
model_class: get_tester_classes_for_model(UpperCamelCase__, UpperCamelCase__ ) for model_class in model_classes
}
return model_to_tester_mapping
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
if isinstance(UpperCamelCase__, UpperCamelCase__ ):
return o
elif isinstance(UpperCamelCase__, UpperCamelCase__ ):
return o.__name__
elif isinstance(UpperCamelCase__, (list, tuple) ):
return [to_json(UpperCamelCase__ ) for x in o]
elif isinstance(UpperCamelCase__, UpperCamelCase__ ):
return {to_json(UpperCamelCase__ ): to_json(UpperCamelCase__ ) for k, v in o.items()}
else:
return o
| 152 | 1 |
'''simple docstring'''
def __lowerCamelCase ( _lowercase , _lowercase ) -> str:
"""simple docstring"""
return "\n".join(
F'''{number} * {i} = {number * i}''' for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=1_0))
| 364 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a : Union[str, Any] = logging.get_logger(__name__)
a : Union[str, Any] = {
"""facebook/detr-resnet-50""": """https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json""",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class UpperCamelCase_ ( __magic_name__ ):
lowercase = 'detr'
lowercase = ['past_key_values']
lowercase = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , A=True , A=None , A=3 , A=100 , A=6 , A=2048 , A=8 , A=6 , A=2048 , A=8 , A=0.0 , A=0.0 , A=True , A="relu" , A=256 , A=0.1 , A=0.0 , A=0.0 , A=0.0_2 , A=1.0 , A=False , A="sine" , A="resnet50" , A=True , A=False , A=1 , A=5 , A=2 , A=1 , A=1 , A=5 , A=2 , A=0.1 , **A , ) -> List[str]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
UpperCAmelCase : Optional[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(A , A ):
UpperCAmelCase : Any = backbone_config.get("""model_type""" )
UpperCAmelCase : int = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase : List[Any] = config_class.from_dict(A )
# set timm attributes to None
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = None, None, None
UpperCAmelCase : Dict = use_timm_backbone
UpperCAmelCase : Any = backbone_config
UpperCAmelCase : List[Any] = num_channels
UpperCAmelCase : int = num_queries
UpperCAmelCase : List[str] = d_model
UpperCAmelCase : Tuple = encoder_ffn_dim
UpperCAmelCase : Optional[Any] = encoder_layers
UpperCAmelCase : Any = encoder_attention_heads
UpperCAmelCase : Optional[Any] = decoder_ffn_dim
UpperCAmelCase : Optional[int] = decoder_layers
UpperCAmelCase : Any = decoder_attention_heads
UpperCAmelCase : str = dropout
UpperCAmelCase : Tuple = attention_dropout
UpperCAmelCase : Dict = activation_dropout
UpperCAmelCase : Tuple = activation_function
UpperCAmelCase : List[Any] = init_std
UpperCAmelCase : str = init_xavier_std
UpperCAmelCase : List[Any] = encoder_layerdrop
UpperCAmelCase : int = decoder_layerdrop
UpperCAmelCase : List[Any] = encoder_layers
UpperCAmelCase : Union[str, Any] = auxiliary_loss
UpperCAmelCase : str = position_embedding_type
UpperCAmelCase : Union[str, Any] = backbone
UpperCAmelCase : List[str] = use_pretrained_backbone
UpperCAmelCase : Optional[int] = dilation
# Hungarian matcher
UpperCAmelCase : Union[str, Any] = class_cost
UpperCAmelCase : Optional[Any] = bbox_cost
UpperCAmelCase : List[Any] = giou_cost
# Loss coefficients
UpperCAmelCase : int = mask_loss_coefficient
UpperCAmelCase : Optional[int] = dice_loss_coefficient
UpperCAmelCase : Dict = bbox_loss_coefficient
UpperCAmelCase : Any = giou_loss_coefficient
UpperCAmelCase : Any = eos_coefficient
super().__init__(is_encoder_decoder=A , **A )
@property
def _lowercase( self ) -> int:
return self.encoder_attention_heads
@property
def _lowercase( self ) -> int:
return self.d_model
@classmethod
def _lowercase( cls , A , **A ) -> Dict:
return cls(backbone_config=A , **A )
def _lowercase( self ) -> Dict[str, any]:
UpperCAmelCase : Any = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
UpperCAmelCase : Any = self.backbone_config.to_dict()
UpperCAmelCase : Optional[Any] = self.__class__.model_type
return output
class UpperCamelCase_ ( __magic_name__ ):
lowercase = version.parse('1.11' )
@property
def _lowercase( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def _lowercase( self ) -> float:
return 1e-5
@property
def _lowercase( self ) -> int:
return 12
| 338 | 0 |
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = ['vqvae']
def __init__( self , lowercase , lowercase , lowercase , lowercase , ) -> Dict:
super().__init__()
self.register_modules(unet=lowercase , scheduler=lowercase , mel=lowercase , vqvae=lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> int:
return 50 if isinstance(self.scheduler , lowercase ) else 1000
@torch.no_grad()
def __call__( self , lowercase = 1 , lowercase = None , lowercase = None , lowercase = 0 , lowercase = 0 , lowercase = None , lowercase = None , lowercase = 0 , lowercase = 0 , lowercase = None , lowercase = 0 , lowercase = None , lowercase = None , lowercase=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
lowerCamelCase_ = steps or self.get_default_steps()
self.scheduler.set_timesteps(lowercase )
lowerCamelCase_ = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
lowerCamelCase_ = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
lowerCamelCase_ = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=lowercase , device=self.device , )
lowerCamelCase_ = noise
lowerCamelCase_ = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(lowercase , lowercase )
lowerCamelCase_ = self.mel.audio_slice_to_image(lowercase )
lowerCamelCase_ = np.frombuffer(input_image.tobytes() , dtype="uint8" ).reshape(
(input_image.height, input_image.width) )
lowerCamelCase_ = (input_image / 255) * 2 - 1
lowerCamelCase_ = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
lowerCamelCase_ = self.vqvae.encode(torch.unsqueeze(lowercase , 0 ) ).latent_dist.sample(
generator=lowercase )[0]
lowerCamelCase_ = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
lowerCamelCase_ = self.scheduler.add_noise(lowercase , lowercase , self.scheduler.timesteps[start_step - 1] )
lowerCamelCase_ = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
lowerCamelCase_ = int(mask_start_secs * pixels_per_second )
lowerCamelCase_ = int(mask_end_secs * pixels_per_second )
lowerCamelCase_ = self.scheduler.add_noise(lowercase , lowercase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , lowercase ):
lowerCamelCase_ = self.unet(lowercase , lowercase , lowercase )["sample"]
else:
lowerCamelCase_ = self.unet(lowercase , lowercase )["sample"]
if isinstance(self.scheduler , lowercase ):
lowerCamelCase_ = self.scheduler.step(
model_output=lowercase , timestep=lowercase , sample=lowercase , eta=lowercase , generator=lowercase , )["prev_sample"]
else:
lowerCamelCase_ = self.scheduler.step(
model_output=lowercase , timestep=lowercase , sample=lowercase , generator=lowercase , )["prev_sample"]
if mask is not None:
if mask_start > 0:
lowerCamelCase_ = mask[:, step, :, :mask_start]
if mask_end > 0:
lowerCamelCase_ = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
lowerCamelCase_ = 1 / self.vqvae.config.scaling_factor * images
lowerCamelCase_ = self.vqvae.decode(lowercase )["sample"]
lowerCamelCase_ = (images / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase_ = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
lowerCamelCase_ = (images * 255).round().astype("uint8" )
lowerCamelCase_ = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(lowercase , mode="RGB" ).convert("L" ) for _ in images) )
lowerCamelCase_ = [self.mel.image_to_audio(lowercase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(lowercase )[:, np.newaxis, :] ) , **ImagePipelineOutput(lowercase ) )
@torch.no_grad()
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = 50 ) -> np.ndarray:
assert isinstance(self.scheduler , lowercase )
self.scheduler.set_timesteps(lowercase )
lowerCamelCase_ = np.array(
[np.frombuffer(image.tobytes() , dtype="uint8" ).reshape((1, image.height, image.width) ) for image in images] )
lowerCamelCase_ = (sample / 255) * 2 - 1
lowerCamelCase_ = torch.Tensor(lowercase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
lowerCamelCase_ = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
lowerCamelCase_ = self.scheduler.alphas_cumprod[t]
lowerCamelCase_ = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
lowerCamelCase_ = 1 - alpha_prod_t
lowerCamelCase_ = self.unet(lowercase , lowercase )["sample"]
lowerCamelCase_ = (1 - alpha_prod_t_prev) ** 0.5 * model_output
lowerCamelCase_ = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
lowerCamelCase_ = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def SCREAMING_SNAKE_CASE_( lowercase , lowercase , lowercase ) -> torch.Tensor:
lowerCamelCase_ = acos(torch.dot(torch.flatten(lowercase ) , torch.flatten(lowercase ) ) / torch.norm(lowercase ) / torch.norm(lowercase ) )
return sin((1 - alpha) * theta ) * xa / sin(lowercase ) + sin(alpha * theta ) * xa / sin(lowercase )
| 19 |
import math
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ = 0 , lowerCamelCase__ = 0 ):
lowerCamelCase_ = end or len(lowerCamelCase__ )
for i in range(lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = i
lowerCamelCase_ = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
lowerCamelCase_ = array[temp_index - 1]
temp_index -= 1
lowerCamelCase_ = temp_index_value
return array
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): # Max Heap
lowerCamelCase_ = index
lowerCamelCase_ = 2 * index + 1 # Left Node
lowerCamelCase_ = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
lowerCamelCase_ = left_index
if right_index < heap_size and array[largest] < array[right_index]:
lowerCamelCase_ = right_index
if largest != index:
lowerCamelCase_ , lowerCamelCase_ = array[largest], array[index]
heapify(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = len(lowerCamelCase__ )
for i in range(n // 2 , -1 , -1 ):
heapify(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
for i in range(n - 1 , 0 , -1 ):
lowerCamelCase_ , lowerCamelCase_ = array[0], array[i]
heapify(lowerCamelCase__ , 0 , lowerCamelCase__ )
return array
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = low
lowerCamelCase_ = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
lowerCamelCase_ , lowerCamelCase_ = array[j], array[i]
i += 1
def lowerCamelCase_ ( lowerCamelCase__ ):
if len(lowerCamelCase__ ) == 0:
return array
lowerCamelCase_ = 2 * math.ceil(math.loga(len(lowerCamelCase__ ) ) )
lowerCamelCase_ = 1_6
return intro_sort(lowerCamelCase__ , 0 , len(lowerCamelCase__ ) , lowerCamelCase__ , lowerCamelCase__ )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(lowerCamelCase__ )
max_depth -= 1
lowerCamelCase_ = median_of_a(lowerCamelCase__ , lowerCamelCase__ , start + ((end - start) // 2) + 1 , end - 1 )
lowerCamelCase_ = partition(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
intro_sort(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = p
return insertion_sort(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
__A =input('''Enter numbers separated by a comma : ''').strip()
__A =[float(item) for item in user_input.split(''',''')]
print(sort(unsorted))
| 19 | 1 |
"""simple docstring"""
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
a : Any = get_logger(__name__)
a : Any = r"""
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search
kwargs (`Dict[str, Any]`, *optional*):
Additional logits processor specific kwargs.
Return:
`jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
"""
class __UpperCAmelCase:
"""simple docstring"""
@add_start_docstrings(snake_case__ )
def __call__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class __UpperCAmelCase:
"""simple docstring"""
@add_start_docstrings(snake_case__ )
def __call__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@add_start_docstrings(snake_case__ )
def __call__( self , snake_case__ , snake_case__ , snake_case__ , **snake_case__ ):
'''simple docstring'''
for processor in self:
lowercase__ : Optional[Any]= inspect.signature(processor.__call__ ).parameters
if len(snake_case__ ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
F'''{processor.__class__} are passed to the logits processor.''' )
lowercase__ : Union[str, Any]= processor(snake_case__ , snake_case__ , snake_case__ , **snake_case__ )
else:
lowercase__ : Dict= processor(snake_case__ , snake_case__ , snake_case__ )
return scores
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ) or not (temperature > 0):
raise ValueError(F'''`temperature` has to be a strictly positive float, but is {temperature}''' )
lowercase__ : Any= temperature
def __call__( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : int= scores / self.temperature
return scores
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ = -float("Inf" ) , snake_case__ = 1 ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(snake_case__ , snake_case__ ) or (min_tokens_to_keep < 1):
raise ValueError(F'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
lowercase__ : int= top_p
lowercase__ : Optional[int]= filter_value
lowercase__ : Tuple= min_tokens_to_keep
def __call__( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__, lowercase__ : Dict= lax.top_k(snake_case__ , scores.shape[-1] )
lowercase__ : Optional[int]= jnp.full_like(snake_case__ , self.filter_value )
lowercase__ : Union[str, Any]= jax.nn.softmax(snake_case__ , axis=-1 ).cumsum(axis=-1 )
lowercase__ : str= cumulative_probs < self.top_p
# include the token that is higher than top_p as well
lowercase__ : str= jnp.roll(snake_case__ , 1 )
score_mask |= score_mask.at[:, 0].set(snake_case__ )
# min tokens to keep
lowercase__ : Optional[int]= score_mask.at[:, : self.min_tokens_to_keep].set(snake_case__ )
lowercase__ : str= jnp.where(snake_case__ , snake_case__ , snake_case__ )
lowercase__ : str= jax.lax.sort_key_val(snake_case__ , snake_case__ )[-1]
return next_scores
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ = -float("Inf" ) , snake_case__ = 1 ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ) or top_k <= 0:
raise ValueError(F'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
lowercase__ : List[Any]= max(snake_case__ , snake_case__ )
lowercase__ : Dict= filter_value
def __call__( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__, lowercase__ : Optional[Any]= scores.shape
lowercase__ : int= jnp.full(batch_size * vocab_size , self.filter_value )
lowercase__ : Dict= min(self.top_k , scores.shape[-1] ) # Safety check
lowercase__, lowercase__ : List[Any]= lax.top_k(snake_case__ , snake_case__ )
lowercase__ : Optional[int]= jnp.broadcast_to((jnp.arange(snake_case__ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
lowercase__ : str= topk_scores.flatten()
lowercase__ : Any= topk_indices.flatten() + shift
lowercase__ : Optional[Any]= next_scores_flat.at[topk_indices_flat].set(snake_case__ )
lowercase__ : str= next_scores_flat.reshape(snake_case__ , snake_case__ )
return next_scores
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ ):
'''simple docstring'''
lowercase__ : Any= bos_token_id
def __call__( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : Any= jnp.full(scores.shape , -float("inf" ) )
lowercase__ : int= 1 - jnp.bool_(cur_len - 1 )
lowercase__ : int= jnp.where(snake_case__ , new_scores.at[:, self.bos_token_id].set(0 ) , snake_case__ )
return scores
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : Tuple= max_length
lowercase__ : str= eos_token_id
def __call__( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : List[Any]= jnp.full(scores.shape , -float("inf" ) )
lowercase__ : Any= 1 - jnp.bool_(cur_len - self.max_length + 1 )
lowercase__ : Optional[int]= jnp.where(snake_case__ , new_scores.at[:, self.eos_token_id].set(0 ) , snake_case__ )
return scores
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ) or min_length < 0:
raise ValueError(F'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(snake_case__ , snake_case__ ) or eos_token_id < 0:
raise ValueError(F'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
lowercase__ : List[str]= min_length
lowercase__ : Dict= eos_token_id
def __call__( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
# create boolean flag to decide if min length penalty should be applied
lowercase__ : Tuple= 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
lowercase__ : Dict= jnp.where(snake_case__ , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , snake_case__ )
return scores
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : Optional[Any]= list(snake_case__ )
lowercase__ : List[Any]= begin_index
def __call__( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : str= 1 - jnp.bool_(cur_len - self.begin_index )
lowercase__ : str= jnp.where(snake_case__ , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , snake_case__ )
return scores
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ ):
'''simple docstring'''
lowercase__ : List[Any]= list(snake_case__ )
def __call__( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : Any= scores.at[..., self.suppress_tokens].set(-float("inf" ) )
return scores
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ ):
'''simple docstring'''
lowercase__ : int= dict(snake_case__ )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
lowercase__ : List[Any]= jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
lowercase__ : List[Any]= force_token_array.at[index].set(snake_case__ )
lowercase__ : int= jnp.intaa(snake_case__ )
def __call__( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
def _force_token(snake_case__ ):
lowercase__ : Dict= scores.shape[0]
lowercase__ : Any= self.force_token_array[generation_idx]
lowercase__ : List[Any]= jnp.ones_like(snake_case__ , dtype=scores.dtype ) * -float("inf" )
lowercase__ : List[Any]= jnp.zeros((batch_size, 1) , dtype=scores.dtype )
lowercase__ : List[str]= lax.dynamic_update_slice(snake_case__ , snake_case__ , (0, current_token) )
return new_scores
lowercase__ : Dict= lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(snake_case__ ) , lambda: scores , ) , )
return scores
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : str= generate_config.eos_token_id
lowercase__ : Optional[int]= generate_config.no_timestamps_token_id
lowercase__ : Dict= generate_config.no_timestamps_token_id + 1
lowercase__ : List[Any]= decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(snake_case__ , "max_initial_timestamp_index" ):
lowercase__ : int= generate_config.max_initial_timestamp_index
else:
lowercase__ : Dict= model_config.vocab_size
if self.max_initial_timestamp_index is None:
lowercase__ : str= model_config.vocab_size
def __call__( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
# suppress <|notimestamps|> which is handled by without_timestamps
lowercase__ : int= scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) )
def handle_pairs(snake_case__ , snake_case__ ):
lowercase__ : Union[str, Any]= jnp.where((cur_len - self.begin_index) >= 1 , snake_case__ , snake_case__ )
lowercase__ : Tuple= jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , snake_case__ , )
lowercase__ : int= jnp.where((cur_len - self.begin_index) < 2 , snake_case__ , snake_case__ )
lowercase__ : Optional[int]= jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , snake_case__ , snake_case__ , )
return jnp.where(
snake_case__ , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , snake_case__ , )
lowercase__ : List[str]= jax.vmap(snake_case__ )(snake_case__ , snake_case__ )
lowercase__ : str= jnp.where(cur_len == self.begin_index , snake_case__ , snake_case__ )
lowercase__ : List[Any]= jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , snake_case__ , )
lowercase__ : Any= self.timestamp_begin + self.max_initial_timestamp_index
lowercase__ : str= jnp.where(
snake_case__ , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , snake_case__ , )
# if sum of probability over timestamps is above any other token, sample timestamp
lowercase__ : str= jax.nn.log_softmax(snake_case__ , axis=-1 )
def handle_cumulative_probs(snake_case__ , snake_case__ ):
lowercase__ : Dict= jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
lowercase__ : Union[str, Any]= jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , snake_case__ , )
lowercase__ : Optional[int]= jax.vmap(snake_case__ )(snake_case__ , snake_case__ )
return scores
| 150 |
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
a : List[str] = {
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 128,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 50,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 10,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 10,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
@classmethod
def UpperCAmelCase_ ( cls ):
'''simple docstring'''
lowercase__ : Union[str, Any]= TOKEN
HfFolder.save_token(snake_case__ )
@classmethod
def UpperCAmelCase_ ( cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-config-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-config" )
except HTTPError:
pass
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("test-config" , use_auth_token=self._token )
lowercase__ : List[Any]= BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(snake_case__ , repo_id="test-config" , push_to_hub=snake_case__ , use_auth_token=self._token )
lowercase__ : int= BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[int]= BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token )
lowercase__ : Optional[Any]= BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
snake_case__ , repo_id="valid_org/test-config-org" , push_to_hub=snake_case__ , use_auth_token=self._token )
lowercase__ : Any= BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
lowercase__ : Union[str, Any]= CustomConfig(attribute=42 )
config.push_to_hub("test-dynamic-config" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"} )
lowercase__ : List[str]= AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=snake_case__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , "CustomConfig" )
self.assertEqual(new_config.attribute , 42 )
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowercase__ : str= c.n_embd + 1 # int
lowercase__ : Tuple= c.resid_pdrop + 1.0 # float
lowercase__ : Union[str, Any]= not c.scale_attn_weights # bool
lowercase__ : Optional[Any]= c.summary_type + "foo" # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(snake_case__ , c.n_embd , "mismatch for key: n_embd" )
self.assertEqual(snake_case__ , c.resid_pdrop , "mismatch for key: resid_pdrop" )
self.assertEqual(snake_case__ , c.scale_attn_weights , "mismatch for key: scale_attn_weights" )
self.assertEqual(snake_case__ , c.summary_type , "mismatch for key: summary_type" )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= PretrainedConfig()
lowercase__ : List[str]= [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
snake_case__ , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] )
lowercase__ : Tuple= [key for key, value in config_common_kwargs.items() if value == getattr(snake_case__ , snake_case__ )]
if len(snake_case__ ) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
F''' {', '.join(snake_case__ )}.''' )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
with self.assertRaises(snake_case__ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowercase__ : Optional[int]= BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" )
lowercase__ : Optional[Any]= BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert" )
self.assertIsNotNone(snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
# A mock response for an HTTP head request to emulate server down
lowercase__ : str= mock.Mock()
lowercase__ : Optional[Any]= 500
lowercase__ : Any= {}
lowercase__ : Tuple= HTTPError
lowercase__ : List[Any]= {}
# Download this model to make sure it's in the cache.
lowercase__ : Any= BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=snake_case__ ) as mock_head:
lowercase__ : Any= BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase_ ( self ):
'''simple docstring'''
# This test is for deprecated behavior and can be removed in v5
lowercase__ : Optional[Any]= BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : int= AutoConfig.from_pretrained("bert-base-cased" )
lowercase__ : Optional[int]= ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(snake_case__ )
lowercase__ : List[Any]= 2
json.dump(configuration.to_dict() , open(os.path.join(snake_case__ , "config.4.0.0.json" ) , "w" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowercase__ : int= AutoConfig.from_pretrained(snake_case__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowercase__ : Optional[int]= ["config.42.0.0.json"]
lowercase__ : int= 768
configuration.save_pretrained(snake_case__ )
shutil.move(os.path.join(snake_case__ , "config.4.0.0.json" ) , os.path.join(snake_case__ , "config.42.0.0.json" ) )
lowercase__ : Optional[Any]= AutoConfig.from_pretrained(snake_case__ )
self.assertEqual(new_configuration.hidden_size , 768 )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
lowercase__ : Optional[Any]= "hf-internal-testing/test-two-configs"
import transformers as new_transformers
lowercase__ : Optional[Any]= "v4.0.0"
lowercase__, lowercase__ : str= new_transformers.models.auto.AutoConfig.from_pretrained(
snake_case__ , return_unused_kwargs=snake_case__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(snake_case__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowercase__ : Dict= "v3.0.0"
lowercase__ : Tuple= old_transformers.models.auto.AutoConfig.from_pretrained(snake_case__ )
self.assertEqual(old_configuration.hidden_size , 768 )
| 150 | 1 |
from __future__ import annotations
def A (__A : int ) -> bool:
"""simple docstring"""
UpperCAmelCase_ = str(__A )
return len(__A ) == 9 and set(__A ) == set('''123456789''' )
def A () -> int | None:
"""simple docstring"""
for base_num in range(9999 , 4999 , -1 ):
UpperCAmelCase_ = 100002 * base_num
if is_9_pandigital(__A ):
return candidate
for base_num in range(333 , 99 , -1 ):
UpperCAmelCase_ = 1002003 * base_num
if is_9_pandigital(__A ):
return candidate
return None
if __name__ == "__main__":
print(f"{solution() = }")
| 51 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : int = logging.get_logger(__name__)
snake_case_ : str = {}
class __snake_case ( a ):
UpperCAmelCase__ : str = '''llama'''
UpperCAmelCase__ : Dict = ['''past_key_values''']
def __init__( self : str , _snake_case : List[str]=32000 , _snake_case : int=4096 , _snake_case : List[str]=11008 , _snake_case : Optional[int]=32 , _snake_case : List[Any]=32 , _snake_case : Tuple=None , _snake_case : int="silu" , _snake_case : List[Any]=2048 , _snake_case : List[str]=0.0_2 , _snake_case : Any=1e-6 , _snake_case : List[str]=True , _snake_case : Optional[Any]=0 , _snake_case : Dict=1 , _snake_case : List[Any]=2 , _snake_case : str=1 , _snake_case : Union[str, Any]=False , _snake_case : str=None , **_snake_case : List[Any] , ):
"""simple docstring"""
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = num_key_value_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = rms_norm_eps
UpperCAmelCase_ = pretraining_tp
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , tie_word_embeddings=_snake_case , **_snake_case , )
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _snake_case) or len(self.rope_scaling) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F"""got {self.rope_scaling}""")
UpperCAmelCase_ = self.rope_scaling.get('''type''' , _snake_case)
UpperCAmelCase_ = self.rope_scaling.get('''factor''' , _snake_case)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""")
if rope_scaling_factor is None or not isinstance(_snake_case , _snake_case) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""")
| 51 | 1 |
"""simple docstring"""
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('''1.6'''):
A = True
from torch.cuda.amp import autocast
A = logging.getLogger(__name__)
@dataclass
class __lowercase :
__lowerCAmelCase = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={'''help''': '''Whether to log verbose messages or not.'''} , )
__lowerCAmelCase = field(
default=2.0 , metadata={'''help''': '''Maximum temperature for gumbel softmax.'''} )
__lowerCAmelCase = field(
default=0.5 , metadata={'''help''': '''Minimum temperature for gumbel softmax.'''} )
__lowerCAmelCase = field(
default=0.99_9995 , metadata={'''help''': '''Decay of gumbel temperature during training.'''} )
def __A ( a_ :Any , a_ :Any) -> Union[str, Any]:
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout)] , )
__a : Any = logging.WARNING
if model_args.verbose_logging:
__a : Optional[Any] = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank):
__a : List[Any] = logging.INFO
logger.setLevel(a__)
@dataclass
class __lowercase :
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__lowerCAmelCase = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
__lowerCAmelCase = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
__lowerCAmelCase = field(
default='''file''' , metadata={'''help''': '''Column in the dataset that contains speech file path. Defaults to \'file\''''} , )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
__lowerCAmelCase = field(
default=1 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
__lowerCAmelCase = field(
default=20.0 , metadata={'''help''': '''Filter audio files that are longer than `max_duration_in_seconds` seconds'''} )
@dataclass
class __lowercase :
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = '''longest'''
__lowerCAmelCase = None
__lowerCAmelCase = None
def __call__( self , _UpperCAmelCase ):
__a : List[Any] = self.feature_extractor.pad(
_snake_case , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
__a : List[Any] = self.model._get_feat_extract_output_lengths(batch['''input_values'''].shape[-1] )
__a : Dict = batch['''input_values'''].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
__a : int = self.model._get_feat_extract_output_lengths(batch['''attention_mask'''].sum(-1 ) ).to(
torch.long )
__a : Dict = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch['''input_values'''].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
__a : Dict = 1
__a : List[str] = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
__a : List[str] = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=_snake_case , min_masks=2 , )
return batch
class __lowercase ( _UpperCamelCase ):
def __init__( self , *_UpperCAmelCase , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=1.0 , **_UpperCAmelCase ):
super().__init__(*_snake_case , **_snake_case )
__a : Union[str, Any] = 0
__a : int = max_gumbel_temp
__a : Optional[Any] = min_gumbel_temp
__a : Tuple = gumbel_temp_decay
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
model.train()
__a : int = self._prepare_inputs(_snake_case )
if self.use_amp:
with autocast():
__a : Dict = self.compute_loss(_snake_case , _snake_case )
else:
__a : Any = self.compute_loss(_snake_case , _snake_case )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
__a : Tuple = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
__a : Dict = loss.sum() / (inputs['''mask_time_indices''']).sum()
else:
raise ValueError(f"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
__a : List[str] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(_snake_case ).backward()
elif self.use_apex:
with amp.scale_loss(_snake_case , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(_snake_case )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def __A ( ) -> Union[str, Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__a : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
__a , __a , __a : List[Any] = parser.parse_args_into_dataclasses()
configure_logger(a__ , a__)
# Downloading and loading a dataset from the hub.
__a : List[str] = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir)
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
__a : Tuple = DatasetDict()
__a : Optional[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""{data_args.train_split_name}[:{data_args.validation_split_percentage}%]""" , cache_dir=model_args.cache_dir , )
__a : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""{data_args.train_split_name}[{data_args.validation_split_percentage}%:]""" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
__a : Optional[int] = DatasetDict()
__a : Optional[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split='''validation''' , cache_dir=model_args.cache_dir , )
__a : int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""{data_args.train_split_name}""" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
__a : Dict = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=a__)
def prepare_dataset(a_ :Tuple):
# check that all files have the correct sampling rate
__a , __a : List[str] = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate)
return batch
# load audio files into numpy arrays
__a : List[Any] = datasets.map(
a__ , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets['''train'''].column_names)
# filter audio files that are too long
__a : Optional[int] = vectorized_datasets.filter(
lambda a_: len(data['''speech''']) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate))
def normalize(a_ :List[Any]):
return feature_extractor(batch['''speech'''] , sampling_rate=feature_extractor.sampling_rate)
# normalize and transform to `BatchFeatures`
__a : Tuple = vectorized_datasets.map(
a__ , batched=a__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets['''train'''].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
__a : int = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
'''PreTraining is only supported for ``config.do_stable_layer_norm=True`` and'''
''' ``config.feat_extract_norm=\'layer\'''')
__a : Any = WavaVecaForPreTraining(a__)
__a : Dict = DataCollatorForWavaVecaPretraining(model=a__ , feature_extractor=a__)
__a : Optional[int] = WavaVecaPreTrainer(
model=a__ , data_collator=a__ , args=a__ , train_dataset=vectorized_datasets['''train'''] , eval_dataset=vectorized_datasets['''validation'''] , tokenizer=a__ , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 361 |
"""simple docstring"""
def __A ( a_ :float) -> float:
if edge <= 0 or not isinstance(a_ , a_):
raise ValueError('''Length must be a positive.''')
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def __A ( a_ :float) -> float:
if edge <= 0 or not isinstance(a_ , a_):
raise ValueError('''Length must be a positive.''')
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 188 | 0 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
lowercase__ :Optional[int] = pytest.mark.integration
@require_faiss
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def A__ ( self):
lowercase = Dataset.from_dict({'''filename''': ['''my_name-train''' + '''_''' + str(A__) for x in np.arange(3_0).tolist()]})
return dset
def A__ ( self):
import faiss
lowercase = self._create_dummy_dataset()
lowercase = dset.map(
lambda A__ ,A__: {"vecs": i * np.ones(5 ,dtype=np.floataa)} ,with_indices=A__ ,keep_in_memory=A__)
lowercase = dset.add_faiss_index('''vecs''' ,batch_size=1_0_0 ,metric_type=faiss.METRIC_INNER_PRODUCT)
lowercase , lowercase = dset.get_nearest_examples('''vecs''' ,np.ones(5 ,dtype=np.floataa))
self.assertEqual(examples['''filename'''][0] ,'''my_name-train_29''')
dset.drop_index('''vecs''')
def A__ ( self):
import faiss
lowercase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 ,1) ,index_name='''vecs''' ,batch_size=1_0_0 ,metric_type=faiss.METRIC_INNER_PRODUCT ,)
lowercase , lowercase = dset.get_nearest_examples('''vecs''' ,np.ones(5 ,dtype=np.floataa))
self.assertEqual(examples['''filename'''][0] ,'''my_name-train_29''')
def A__ ( self):
import faiss
lowercase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 ,1) ,index_name='''vecs''' ,metric_type=faiss.METRIC_INNER_PRODUCT ,)
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=A__) as tmp_file:
dset.save_faiss_index('''vecs''' ,tmp_file.name)
dset.load_faiss_index('''vecs2''' ,tmp_file.name)
os.unlink(tmp_file.name)
lowercase , lowercase = dset.get_nearest_examples('''vecs2''' ,np.ones(5 ,dtype=np.floataa))
self.assertEqual(examples['''filename'''][0] ,'''my_name-train_29''')
def A__ ( self):
lowercase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 ,1) ,index_name='''vecs''')
dset.drop_index('''vecs''')
self.assertRaises(A__ ,partial(dset.get_nearest_examples ,'''vecs2''' ,np.ones(5 ,dtype=np.floataa)))
def A__ ( self):
from elasticsearch import Elasticsearch
lowercase = self._create_dummy_dataset()
with patch('''elasticsearch.Elasticsearch.search''') as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''') as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''') as mocked_bulk:
lowercase = {'''acknowledged''': True}
mocked_bulk.return_value([(True, None)] * 3_0)
lowercase = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 2_9}]}}
lowercase = Elasticsearch()
dset.add_elasticsearch_index('''filename''' ,es_client=A__)
lowercase , lowercase = dset.get_nearest_examples('''filename''' ,'''my_name-train_29''')
self.assertEqual(examples['''filename'''][0] ,'''my_name-train_29''')
@require_faiss
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def A__ ( self):
import faiss
lowercase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
# add vectors
index.add_vectors(np.eye(5 ,dtype=np.floataa))
self.assertIsNotNone(index.faiss_index)
self.assertEqual(index.faiss_index.ntotal ,5)
index.add_vectors(np.zeros((5, 5) ,dtype=np.floataa))
self.assertEqual(index.faiss_index.ntotal ,1_0)
# single query
lowercase = np.zeros(5 ,dtype=np.floataa)
lowercase = 1
lowercase , lowercase = index.search(A__)
self.assertRaises(A__ ,index.search ,query.reshape(-1 ,1))
self.assertGreater(scores[0] ,0)
self.assertEqual(indices[0] ,1)
# batched queries
lowercase = np.eye(5 ,dtype=np.floataa)[::-1]
lowercase , lowercase = index.search_batch(A__)
self.assertRaises(A__ ,index.search_batch ,queries[0])
lowercase = [scores[0] for scores in total_scores]
lowercase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(A__) ,0)
self.assertListEqual([4, 3, 2, 1, 0] ,A__)
def A__ ( self):
import faiss
lowercase = FaissIndex(string_factory='''Flat''')
index.add_vectors(np.eye(5 ,dtype=np.floataa))
self.assertIsInstance(index.faiss_index ,faiss.IndexFlat)
lowercase = FaissIndex(string_factory='''LSH''')
index.add_vectors(np.eye(5 ,dtype=np.floataa))
self.assertIsInstance(index.faiss_index ,faiss.IndexLSH)
with self.assertRaises(A__):
lowercase = FaissIndex(string_factory='''Flat''' ,custom_index=faiss.IndexFlat(5))
def A__ ( self):
import faiss
lowercase = faiss.IndexFlat(5)
lowercase = FaissIndex(custom_index=A__)
index.add_vectors(np.eye(5 ,dtype=np.floataa))
self.assertIsInstance(index.faiss_index ,faiss.IndexFlat)
def A__ ( self):
import faiss
lowercase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
index.add_vectors(np.eye(5 ,dtype=np.floataa))
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=A__) as tmp_file:
index.save(tmp_file.name)
lowercase = FaissIndex.load(tmp_file.name)
os.unlink(tmp_file.name)
lowercase = np.zeros(5 ,dtype=np.floataa)
lowercase = 1
lowercase , lowercase = index.search(A__)
self.assertGreater(scores[0] ,0)
self.assertEqual(indices[0] ,1)
@require_faiss
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
import faiss
lowercase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
lowercase = '''index.faiss'''
lowercase = f'mock://{index_name}'
index.save(lowerCAmelCase__ , storage_options=mockfs.storage_options )
lowercase = FaissIndex.load(lowerCAmelCase__ , storage_options=mockfs.storage_options )
lowercase = np.zeros(5 , dtype=np.floataa )
lowercase = 1
lowercase , lowercase = index.search(lowerCAmelCase__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def A__ ( self):
from elasticsearch import Elasticsearch
with patch('''elasticsearch.Elasticsearch.search''') as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''') as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''') as mocked_bulk:
lowercase = Elasticsearch()
lowercase = {'''acknowledged''': True}
lowercase = ElasticSearchIndex(es_client=A__)
mocked_bulk.return_value([(True, None)] * 3)
index.add_documents(['''foo''', '''bar''', '''foobar'''])
# single query
lowercase = '''foo'''
lowercase = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
lowercase , lowercase = index.search(A__)
self.assertEqual(scores[0] ,1)
self.assertEqual(indices[0] ,0)
# single query with timeout
lowercase = '''foo'''
lowercase = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
lowercase , lowercase = index.search(A__ ,request_timeout=3_0)
self.assertEqual(scores[0] ,1)
self.assertEqual(indices[0] ,0)
# batched queries
lowercase = ['''foo''', '''bar''', '''foobar''']
lowercase = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
lowercase , lowercase = index.search_batch(A__)
lowercase = [scores[0] for scores in total_scores]
lowercase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(A__) ,0)
self.assertListEqual([1, 1, 1] ,A__)
# batched queries with timeout
lowercase = ['''foo''', '''bar''', '''foobar''']
lowercase = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
lowercase , lowercase = index.search_batch(A__ ,request_timeout=3_0)
lowercase = [scores[0] for scores in total_scores]
lowercase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(A__) ,0)
self.assertListEqual([1, 1, 1] ,A__)
| 101 |
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowercase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : Optional[Any] =IFPipeline
lowercase_ : List[str] =TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
lowercase_ : List[str] =TEXT_TO_IMAGE_BATCH_PARAMS
lowercase_ : int =PipelineTesterMixin.required_optional_params - {'''latents'''}
def A__ ( self):
return self._get_dummy_components()
def A__ ( self ,A__ ,A__=0):
if str(A__).startswith('''mps'''):
lowercase = torch.manual_seed(A__)
else:
lowercase = torch.Generator(device=A__).manual_seed(A__)
lowercase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def A__ ( self):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' ,reason='''float16 requires CUDA''')
def A__ ( self):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1)
def A__ ( self):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def A__ ( self):
self._test_save_load_local()
def A__ ( self):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 ,)
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,)
def A__ ( self):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def A__ ( self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self):
# if
lowercase = IFPipeline.from_pretrained('''DeepFloyd/IF-I-XL-v1.0''' ,variant='''fp16''' ,torch_dtype=torch.floataa)
lowercase = IFSuperResolutionPipeline.from_pretrained(
'''DeepFloyd/IF-II-L-v1.0''' ,variant='''fp16''' ,torch_dtype=torch.floataa ,text_encoder=A__ ,tokenizer=A__)
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('''cuda''')
lowercase , lowercase = pipe_a.encode_prompt('''anime turtle''' ,device='''cuda''')
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
lowercase = None
lowercase = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if(A__ ,A__ ,A__ ,A__)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
lowercase = IFImgaImgPipeline(**pipe_a.components)
lowercase = IFImgaImgSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_imgaimg(A__ ,A__ ,A__ ,A__)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
lowercase = IFInpaintingPipeline(**pipe_a.components)
lowercase = IFInpaintingSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_inpainting(A__ ,A__ ,A__ ,A__)
def A__ ( self ,A__ ,A__ ,A__ ,A__):
# pipeline 1
_start_torch_memory_measurement()
lowercase = torch.Generator(device='''cpu''').manual_seed(0)
lowercase = pipe_a(
prompt_embeds=A__ ,negative_prompt_embeds=A__ ,num_inference_steps=2 ,generator=A__ ,output_type='''np''' ,)
lowercase = output.images[0]
assert image.shape == (6_4, 6_4, 3)
lowercase = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_3 * 1_0**9
lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy''')
assert_mean_pixel_difference(A__ ,A__)
# pipeline 2
_start_torch_memory_measurement()
lowercase = torch.Generator(device='''cpu''').manual_seed(0)
lowercase = floats_tensor((1, 3, 6_4, 6_4) ,rng=random.Random(0)).to(A__)
lowercase = pipe_a(
prompt_embeds=A__ ,negative_prompt_embeds=A__ ,image=A__ ,generator=A__ ,num_inference_steps=2 ,output_type='''np''' ,)
lowercase = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
lowercase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy''')
assert_mean_pixel_difference(A__ ,A__)
def A__ ( self ,A__ ,A__ ,A__ ,A__):
# pipeline 1
_start_torch_memory_measurement()
lowercase = floats_tensor((1, 3, 6_4, 6_4) ,rng=random.Random(0)).to(A__)
lowercase = torch.Generator(device='''cpu''').manual_seed(0)
lowercase = pipe_a(
prompt_embeds=A__ ,negative_prompt_embeds=A__ ,image=A__ ,num_inference_steps=2 ,generator=A__ ,output_type='''np''' ,)
lowercase = output.images[0]
assert image.shape == (6_4, 6_4, 3)
lowercase = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy''')
assert_mean_pixel_difference(A__ ,A__)
# pipeline 2
_start_torch_memory_measurement()
lowercase = torch.Generator(device='''cpu''').manual_seed(0)
lowercase = floats_tensor((1, 3, 2_5_6, 2_5_6) ,rng=random.Random(0)).to(A__)
lowercase = floats_tensor((1, 3, 6_4, 6_4) ,rng=random.Random(0)).to(A__)
lowercase = pipe_a(
prompt_embeds=A__ ,negative_prompt_embeds=A__ ,image=A__ ,original_image=A__ ,generator=A__ ,num_inference_steps=2 ,output_type='''np''' ,)
lowercase = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
lowercase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy''')
assert_mean_pixel_difference(A__ ,A__)
def A__ ( self ,A__ ,A__ ,A__ ,A__):
# pipeline 1
_start_torch_memory_measurement()
lowercase = floats_tensor((1, 3, 6_4, 6_4) ,rng=random.Random(0)).to(A__)
lowercase = floats_tensor((1, 3, 6_4, 6_4) ,rng=random.Random(1)).to(A__)
lowercase = torch.Generator(device='''cpu''').manual_seed(0)
lowercase = pipe_a(
prompt_embeds=A__ ,negative_prompt_embeds=A__ ,image=A__ ,mask_image=A__ ,num_inference_steps=2 ,generator=A__ ,output_type='''np''' ,)
lowercase = output.images[0]
assert image.shape == (6_4, 6_4, 3)
lowercase = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy''')
assert_mean_pixel_difference(A__ ,A__)
# pipeline 2
_start_torch_memory_measurement()
lowercase = torch.Generator(device='''cpu''').manual_seed(0)
lowercase = floats_tensor((1, 3, 6_4, 6_4) ,rng=random.Random(0)).to(A__)
lowercase = floats_tensor((1, 3, 2_5_6, 2_5_6) ,rng=random.Random(0)).to(A__)
lowercase = floats_tensor((1, 3, 2_5_6, 2_5_6) ,rng=random.Random(1)).to(A__)
lowercase = pipe_a(
prompt_embeds=A__ ,negative_prompt_embeds=A__ ,image=A__ ,mask_image=A__ ,original_image=A__ ,generator=A__ ,num_inference_steps=2 ,output_type='''np''' ,)
lowercase = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
lowercase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy''')
assert_mean_pixel_difference(A__ ,A__)
def UpperCamelCase ( ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 101 | 1 |
import sys
import turtle
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(lowerCamelCase__ , get_mid(lowerCamelCase__ , lowerCamelCase__ ) , get_mid(lowerCamelCase__ , lowerCamelCase__ ) , depth - 1 )
triangle(lowerCamelCase__ , get_mid(lowerCamelCase__ , lowerCamelCase__ ) , get_mid(lowerCamelCase__ , lowerCamelCase__ ) , depth - 1 )
triangle(lowerCamelCase__ , get_mid(lowerCamelCase__ , lowerCamelCase__ ) , get_mid(lowerCamelCase__ , lowerCamelCase__ ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
__A =turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
__A =[(-1_7_5, -1_2_5), (0, 1_7_5), (1_7_5, -1_2_5)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 356 |
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__A =get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = AlbertTokenizer
lowerCAmelCase__ = AlbertTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = True
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ = AlbertTokenizer(lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Any:
lowerCamelCase_ = "this is a test"
lowerCamelCase_ = "this is a test"
return input_text, output_text
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = "<pad>"
lowerCamelCase_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase ) , lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase ) , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "▁eloquent" )
self.assertEqual(len(lowercase ) , 30000 )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
if not self.test_rust_tokenizer:
return
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = "I was born in 92000, and this is falsé."
lowerCamelCase_ = tokenizer.tokenize(lowercase )
lowerCamelCase_ = rust_tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
lowerCamelCase_ = tokenizer.encode(lowercase , add_special_tokens=lowercase )
lowerCamelCase_ = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = tokenizer.encode(lowercase )
lowerCamelCase_ = rust_tokenizer.encode(lowercase )
self.assertListEqual(lowercase , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = AlbertTokenizer(lowercase , keep_accents=lowercase )
lowerCamelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowercase , ["▁this", "▁is", "▁a", "▁test"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , [48, 25, 21, 1289] )
lowerCamelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowercase , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", "."] )
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(lowercase )
self.assertListEqual(lowercase , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
lowerCamelCase_ = tokenizer.convert_ids_to_tokens(lowercase )
self.assertListEqual(
lowercase , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "."] , )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = AlbertTokenizer(lowercase )
lowerCamelCase_ = tokenizer.encode("sequence builders" )
lowerCamelCase_ = tokenizer.encode("multi-sequence build" )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(lowercase )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
# fmt: off
lowerCamelCase_ = {"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "input_ids": [[2, 21970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 12051, 18, 17, 7103, 2153, 673, 8, 3515, 18684, 8, 4461, 6, 1927, 297, 8, 12060, 2607, 18, 13, 5, 4461, 15, 10538, 38, 8, 135, 15, 822, 58, 15, 993, 10363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 10641, 6, 29, 84, 2512, 2430, 782, 18684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 11712, 15, 7103, 2153, 673, 17, 24883, 9990, 9, 3], [2, 11502, 25, 1006, 20, 782, 8, 11809, 855, 1732, 19393, 18667, 37, 367, 21018, 69, 1854, 34, 11860, 19124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 17659, 84, 14, 16792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase , model_name="albert-base-v2" , revision="6b6560eaf5ff2e250b00c50f380c5389a9c2d82e" , )
| 47 | 0 |
"""simple docstring"""
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
__A : Dict = logging.getLogger(__name__)
__A : List[str] = "pytorch_model.bin"
@dataclasses.dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = dataclasses.field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models."""})
UpperCamelCase__ = dataclasses.field(
default=lowerCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co."""} , )
@dataclasses.dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = dataclasses.field(metadata={"""help""": """A csv or a json file containing the training data."""})
UpperCamelCase__ = dataclasses.field(metadata={"""help""": """A csv or a json file containing the data to predict on."""})
UpperCamelCase__ = dataclasses.field(
default=lowerCAmelCase , metadata={"""help""": """A csv or a json file containing the validation data."""})
UpperCamelCase__ = dataclasses.field(
default=lowerCAmelCase , metadata={"""help""": """The name of the task to train on."""} , )
UpperCamelCase__ = dataclasses.field(
default=lowerCAmelCase , metadata={"""help""": """The list of labels for the task."""})
@dataclasses.dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = dataclasses.field(
metadata={"""help""": """The output directory where the model predictions and checkpoints will be written."""})
UpperCamelCase__ = dataclasses.field(
default="""accuracy""" , metadata={"""help""": """The evaluation metric used for the task."""})
UpperCamelCase__ = dataclasses.field(
default="""no""" , metadata={
"""help""": """The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"""
} , )
UpperCamelCase__ = dataclasses.field(
default=10 , metadata={"""help""": """Number of evaluation calls with no improvement after which training will be stopped."""} , )
UpperCamelCase__ = dataclasses.field(
default=0.0 , metadata={
"""help""": """How much the specified evaluation metric must improve to satisfy early stopping conditions."""
} , )
UpperCamelCase__ = dataclasses.field(
default=lowerCAmelCase , metadata={"""help""": """Whether to filter the pseudo-labeled data based on the confidence score."""} , )
UpperCamelCase__ = dataclasses.field(
default=lowerCAmelCase , metadata={"""help""": """Whether to filter the pseudo-labeled data based on the validation performance."""} , )
UpperCamelCase__ = dataclasses.field(
default=lowerCAmelCase , metadata={"""help""": """Whether to fine-tune on labeled data after pseudo training."""} , )
UpperCamelCase__ = dataclasses.field(
default=0.0 , metadata={"""help""": """Confidence threshold for pseudo-labeled data filtering."""} , )
UpperCamelCase__ = dataclasses.field(
default=100 , metadata={"""help""": """Number of evaluation calls with no improvement after which training will be stopped."""} , )
UpperCamelCase__ = dataclasses.field(
default=lowerCAmelCase , metadata={"""help""": """Random seed for initialization."""} , )
def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
_UpperCAmelCase = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
_UpperCAmelCase = dataset.filter(lambda _SCREAMING_SNAKE_CASE : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
_UpperCAmelCase = int(eval_result * len(_SCREAMING_SNAKE_CASE ) )
print(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = dataset.sort('''probability''' , reverse=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = dataset.select(range(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = dataset.remove_columns(['''label''', '''probability'''] )
_UpperCAmelCase = dataset.rename_column('''prediction''' , '''label''' )
_UpperCAmelCase = dataset.map(lambda _SCREAMING_SNAKE_CASE : {"label": idalabel[example["label"]]} )
_UpperCAmelCase = dataset.shuffle(seed=args.seed )
_UpperCAmelCase = os.path.join(_SCREAMING_SNAKE_CASE , f'train_pseudo.{args.data_file_extension}' )
if args.data_file_extension == "csv":
dataset.to_csv(_SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE )
else:
dataset.to_json(_SCREAMING_SNAKE_CASE )
def lowercase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any] , **_SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
_UpperCAmelCase = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
_UpperCAmelCase = STModelArguments(model_name_or_path=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = STDataArguments(train_file=_SCREAMING_SNAKE_CASE , infer_file=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = STTrainingArguments(output_dir=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(_SCREAMING_SNAKE_CASE ).items():
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for key, value in kwargs.items():
if hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Sanity checks
_UpperCAmelCase = {}
_UpperCAmelCase = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
_UpperCAmelCase = args.train_file
_UpperCAmelCase = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
_UpperCAmelCase = args.eval_file
for key in data_files:
_UpperCAmelCase = data_files[key].split('''.''' )[-1]
assert extension in ["csv", "json"], f'`{key}_file` should be a csv or a json file.'
if args.data_file_extension is None:
_UpperCAmelCase = extension
else:
assert extension == args.data_file_extension, f'`{key}_file` should be a {args.data_file_extension} file`.'
assert (
args.eval_metric in datasets.list_metrics()
), f'{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('''Creating the initial data directory for self-training...''' )
_UpperCAmelCase = f'{args.output_dir}/self-train_iter-{{}}'.format
_UpperCAmelCase = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=_SCREAMING_SNAKE_CASE )
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
accelerator.wait_for_everyone()
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = 0
_UpperCAmelCase = False
# Show the progress bar
_UpperCAmelCase = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
_UpperCAmelCase = data_dir_format(_SCREAMING_SNAKE_CASE )
assert os.path.exists(_SCREAMING_SNAKE_CASE )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
_UpperCAmelCase = os.path.join(_SCREAMING_SNAKE_CASE , '''stage-1''' )
_UpperCAmelCase = {
'''accelerator''': accelerator,
'''model_name_or_path''': args.model_name_or_path,
'''cache_dir''': args.cache_dir,
'''do_train''': True,
'''train_file''': data_files['''train'''] if iteration == 0 else data_files['''train_pseudo'''],
'''do_eval''': True if args.eval_file is not None else False,
'''eval_file''': data_files['''eval'''],
'''do_predict''': True,
'''infer_file''': data_files['''infer'''],
'''task_name''': args.task_name,
'''label_list''': args.label_list,
'''output_dir''': current_output_dir,
'''eval_metric''': args.eval_metric,
'''evaluation_strategy''': args.evaluation_strategy,
'''early_stopping_patience''': args.early_stopping_patience,
'''early_stopping_threshold''': args.early_stopping_threshold,
'''seed''': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
arguments_dict.update({key: value} )
_UpperCAmelCase = os.path.join(_SCREAMING_SNAKE_CASE , '''best-checkpoint''' , _SCREAMING_SNAKE_CASE )
if os.path.exists(_SCREAMING_SNAKE_CASE ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.''' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 1 *****''' , _SCREAMING_SNAKE_CASE )
finetune(**_SCREAMING_SNAKE_CASE )
accelerator.wait_for_everyone()
assert os.path.exists(_SCREAMING_SNAKE_CASE )
logger.info('''Self-training job completed: iteration: %d, stage: 1.''' , _SCREAMING_SNAKE_CASE )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
_UpperCAmelCase = os.path.join(_SCREAMING_SNAKE_CASE , '''best-checkpoint''' )
_UpperCAmelCase = os.path.join(_SCREAMING_SNAKE_CASE , '''stage-2''' )
# Update arguments_dict
_UpperCAmelCase = model_path
_UpperCAmelCase = data_files['''train''']
_UpperCAmelCase = current_output_dir
_UpperCAmelCase = os.path.join(_SCREAMING_SNAKE_CASE , '''best-checkpoint''' , _SCREAMING_SNAKE_CASE )
if os.path.exists(_SCREAMING_SNAKE_CASE ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.''' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 2 *****''' , _SCREAMING_SNAKE_CASE )
finetune(**_SCREAMING_SNAKE_CASE )
accelerator.wait_for_everyone()
assert os.path.exists(_SCREAMING_SNAKE_CASE )
logger.info('''Self-training job completed: iteration: %d, stage: 2.''' , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = iteration
_UpperCAmelCase = data_dir_format(iteration + 1 )
_UpperCAmelCase = AutoConfig.from_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , '''best-checkpoint''' ) )
_UpperCAmelCase = config.idalabel
_UpperCAmelCase = os.path.join(_SCREAMING_SNAKE_CASE , '''eval_results_best-checkpoint.json''' )
_UpperCAmelCase = os.path.join(_SCREAMING_SNAKE_CASE , '''test_results_best-checkpoint.json''' )
assert os.path.exists(_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE , '''r''' ) as f:
_UpperCAmelCase = float(json.load(_SCREAMING_SNAKE_CASE )[args.eval_metric] )
_UpperCAmelCase = os.path.join(_SCREAMING_SNAKE_CASE , '''infer_output_best-checkpoint.csv''' )
assert os.path.exists(_SCREAMING_SNAKE_CASE )
# Loading the dataset from local csv or json files.
_UpperCAmelCase = load_dataset(args.data_file_extension , data_files={'''data''': data_files['''infer''']} )['''data''']
_UpperCAmelCase = load_dataset('''csv''' , data_files={'''data''': infer_output_file} )['''data''']
if accelerator.is_main_process:
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
shutil.copy(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , f'eval_results_iter-{iteration}.json' ) )
if os.path.exists(_SCREAMING_SNAKE_CASE ):
shutil.copy(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , f'test_results_iter-{iteration}.json' ) )
create_pseudo_labeled_data(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
accelerator.wait_for_everyone()
_UpperCAmelCase = os.path.join(_SCREAMING_SNAKE_CASE , f'train_pseudo.{args.data_file_extension}' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
_UpperCAmelCase = eval_result
if best_iteration is None:
_UpperCAmelCase = new_iteration
_UpperCAmelCase = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
_UpperCAmelCase = new_iteration
_UpperCAmelCase = new_eval_result
_UpperCAmelCase = 0
else:
if new_eval_result == best_eval_result:
_UpperCAmelCase = new_iteration
_UpperCAmelCase = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
_UpperCAmelCase = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('''Best iteration: %d''' , _SCREAMING_SNAKE_CASE )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , _SCREAMING_SNAKE_CASE )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_SCREAMING_SNAKE_CASE , f'eval_results_iter-{iteration}.json' ) , os.path.join(_SCREAMING_SNAKE_CASE , '''eval_results_best-iteration.json''' ) , )
else:
# Assume that the last iteration is the best
logger.info('''Best iteration: %d''' , args.max_selftrain_iterations - 1 )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , _SCREAMING_SNAKE_CASE )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_SCREAMING_SNAKE_CASE , f'eval_results_iter-{args.max_selftrain_iterations - 1}.json' ) , os.path.join(_SCREAMING_SNAKE_CASE , '''eval_results_best-iteration.json''' ) , )
| 260 |
"""simple docstring"""
from __future__ import annotations
import math
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_UpperCAmelCase = str(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = [n]
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if len(str(_SCREAMING_SNAKE_CASE ) ) > 3:
if not is_prime(int(str(_SCREAMING_SNAKE_CASE )[-3:] ) ) or not is_prime(int(str(_SCREAMING_SNAKE_CASE )[:3] ) ):
return False
return True
def lowercase ( _SCREAMING_SNAKE_CASE : int = 11 ):
'''simple docstring'''
_UpperCAmelCase = []
_UpperCAmelCase = 13
while len(_SCREAMING_SNAKE_CASE ) != count:
if validate(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = list_truncated_nums(_SCREAMING_SNAKE_CASE )
if all(is_prime(_SCREAMING_SNAKE_CASE ) for i in list_nums ):
list_truncated_primes.append(_SCREAMING_SNAKE_CASE )
num += 2
return list_truncated_primes
def lowercase ( ):
'''simple docstring'''
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(f'''{sum(compute_truncated_primes(11)) = }''')
| 260 | 1 |
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Dict=1_3 , lowerCAmelCase_ : str=3_2 , lowerCAmelCase_ : List[Any]=3 , lowerCAmelCase_ : str=4 , lowerCAmelCase_ : Dict=[1_0, 2_0, 3_0, 4_0] , lowerCAmelCase_ : int=[2, 2, 3, 2] , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : str=3_7 , lowerCAmelCase_ : str="gelu" , lowerCAmelCase_ : Dict=1_0 , lowerCAmelCase_ : Union[str, Any]=0.02 , lowerCAmelCase_ : List[str]=["stage2", "stage3", "stage4"] , lowerCAmelCase_ : List[Any]=[2, 3, 4] , lowerCAmelCase_ : List[Any]=None , ) -> Union[str, Any]:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = num_stages
__lowerCAmelCase = hidden_sizes
__lowerCAmelCase = depths
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = num_labels
__lowerCAmelCase = initializer_range
__lowerCAmelCase = out_features
__lowerCAmelCase = out_indices
__lowerCAmelCase = scope
def lowercase ( self : int ) -> Optional[int]:
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def lowercase ( self : int ) -> Dict:
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def lowercase ( self : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int ) -> List[Any]:
__lowerCAmelCase = ConvNextVaModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def lowercase ( self : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str] ) -> Tuple:
__lowerCAmelCase = ConvNextVaForImageClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : int ) -> Dict:
__lowerCAmelCase = ConvNextVaBackbone(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__lowerCAmelCase = None
__lowerCAmelCase = ConvNextVaBackbone(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowercase ( self : Optional[int] ) -> Optional[int]:
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
def lowercase ( self : Dict ) -> str:
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {'pixel_values': pixel_values, 'labels': labels}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
a_ = (
{"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
def lowercase ( self : str ) -> Optional[Any]:
__lowerCAmelCase = ConvNextVaModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=3_7 )
def lowercase ( self : Union[str, Any] ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase ( self : int ) -> List[Any]:
return
@unittest.skip(reason='ConvNextV2 does not use inputs_embeds' )
def lowercase ( self : Any ) -> Optional[Any]:
pass
@unittest.skip(reason='ConvNextV2 does not support input and output embeddings' )
def lowercase ( self : str ) -> Any:
pass
@unittest.skip(reason='ConvNextV2 does not use feedforward chunking' )
def lowercase ( self : Optional[Any] ) -> Union[str, Any]:
pass
def lowercase ( self : List[Any] ) -> str:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_with_labels()
__lowerCAmelCase = True
if model_class.__name__ in [
*get_values(lowerCAmelCase_ ),
*get_values(lowerCAmelCase_ ),
]:
continue
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.train()
__lowerCAmelCase = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
__lowerCAmelCase = model(**lowerCAmelCase_ ).loss
loss.backward()
def lowercase ( self : List[str] ) -> Dict:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_with_labels()
__lowerCAmelCase = False
__lowerCAmelCase = True
if (
model_class.__name__
in [*get_values(lowerCAmelCase_ ), *get_values(lowerCAmelCase_ )]
or not model_class.supports_gradient_checkpointing
):
continue
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.gradient_checkpointing_enable()
model.train()
__lowerCAmelCase = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
__lowerCAmelCase = model(**lowerCAmelCase_ ).loss
loss.backward()
def lowercase ( self : Optional[Any] ) -> Union[str, Any]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowercase ( self : Union[str, Any] ) -> str:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowercase ( self : Union[str, Any] ) -> Dict:
def check_hidden_states_output(lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any ):
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
__lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCAmelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase_ ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : Dict ) -> Union[str, Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@slow
def lowercase ( self : int ) -> Optional[Any]:
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = ConvNextVaModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def a_ ( ):
__lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase ( self : List[str] ) -> int:
return AutoImageProcessor.from_pretrained('facebook/convnextv2-tiny-1k-224' ) if is_vision_available() else None
@slow
def lowercase ( self : Optional[Any] ) -> Optional[int]:
__lowerCAmelCase = ConvNextVaForImageClassification.from_pretrained('facebook/convnextv2-tiny-1k-224' ).to(lowerCAmelCase_ )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = preprocessor(images=lowerCAmelCase_ , return_tensors='pt' ).to(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
__lowerCAmelCase = model(**lowerCAmelCase_ )
# verify the logits
__lowerCAmelCase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 207 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Any ) -> Optional[int]:
__lowerCAmelCase = 1_0
def lowercase ( self : int ) -> Union[str, Any]:
__lowerCAmelCase = [1, 2, 3, 4]
__lowerCAmelCase = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(lowerCAmelCase_ , self.block_size , 0 ) , lowerCAmelCase_ )
def lowercase ( self : Optional[Any] ) -> List[str]:
__lowerCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
__lowerCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(lowerCAmelCase_ , self.block_size , 0 ) , lowerCAmelCase_ )
def lowercase ( self : Any ) -> Optional[Any]:
__lowerCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0, 1_1, 1_2, 1_3]
__lowerCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(lowerCAmelCase_ , self.block_size , 0 ) , lowerCAmelCase_ )
def lowercase ( self : List[str] ) -> Any:
__lowerCAmelCase = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.'
__lowerCAmelCase , __lowerCAmelCase = process_story(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , [] )
def lowercase ( self : Any ) -> str:
__lowerCAmelCase = ''
__lowerCAmelCase , __lowerCAmelCase = process_story(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , [] )
self.assertEqual(lowerCAmelCase_ , [] )
def lowercase ( self : int ) -> int:
__lowerCAmelCase = (
'It was the year of Our Lord one thousand seven hundred and '
'seventy-five\n\nSpiritual revelations were conceded to England '
'at that favoured period, as at this.\n@highlight\n\nIt was the best of times'
)
__lowerCAmelCase , __lowerCAmelCase = process_story(lowerCAmelCase_ )
__lowerCAmelCase = [
'It was the year of Our Lord one thousand seven hundred and seventy-five.',
'Spiritual revelations were conceded to England at that favoured period, as at this.',
]
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = ['It was the best of times.']
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : Dict ) -> Any:
__lowerCAmelCase = torch.tensor([1, 2, 3, 4] )
__lowerCAmelCase = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(lowerCAmelCase_ , 0 ).numpy() , expected.numpy() )
def lowercase ( self : List[Any] ) -> Optional[int]:
__lowerCAmelCase = torch.tensor([1, 2, 3, 4, 2_3, 2_3, 2_3] )
__lowerCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowerCAmelCase_ , 2_3 ).numpy() , expected.numpy() )
def lowercase ( self : str ) -> List[Any]:
__lowerCAmelCase = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
__lowerCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowerCAmelCase_ , 1 ).numpy() , expected.numpy() )
def lowercase ( self : Optional[Any] ) -> Optional[int]:
__lowerCAmelCase = 1_0_1
__lowerCAmelCase = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_0_1, 5, 6], [1, 1_0_1, 3, 4, 1_0_1, 6]] )
__lowerCAmelCase = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
__lowerCAmelCase = compute_token_type_ids(lowerCAmelCase_ , lowerCAmelCase_ )
np.testing.assert_array_equal(lowerCAmelCase_ , lowerCAmelCase_ )
| 207 | 1 |
'''simple docstring'''
import os
def A__ ( ):
_UpperCamelCase : List[str] = os.path.join(os.path.dirname(UpperCAmelCase_ ) , 'num.txt' )
with open(UpperCAmelCase_ ) as file_hand:
return str(sum(int(UpperCAmelCase_ ) for line in file_hand ) )[:1_0]
if __name__ == "__main__":
print(solution())
| 83 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class a_ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Dict ):
# A mock response for an HTTP head request to emulate server down
SCREAMING_SNAKE_CASE =mock.Mock()
SCREAMING_SNAKE_CASE =500
SCREAMING_SNAKE_CASE ={}
SCREAMING_SNAKE_CASE =HTTPError
SCREAMING_SNAKE_CASE ={}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' ,return_value=snake_case ) as mock_head:
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def _lowerCAmelCase ( self : Optional[Any] ):
# A mock response for an HTTP head request to emulate server down
SCREAMING_SNAKE_CASE =mock.Mock()
SCREAMING_SNAKE_CASE =500
SCREAMING_SNAKE_CASE ={}
SCREAMING_SNAKE_CASE =HTTPError
SCREAMING_SNAKE_CASE ={}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE =GPTaTokenizerFast.from_pretrained('gpt2' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' ,return_value=snake_case ) as mock_head:
SCREAMING_SNAKE_CASE =GPTaTokenizerFast.from_pretrained('gpt2' )
# This check we did call the fake head request
mock_head.assert_called()
def _lowerCAmelCase ( self : Union[str, Any] ):
# This test is for deprecated behavior and can be removed in v5
try:
SCREAMING_SNAKE_CASE =tempfile.mktemp()
with open(snake_case ,'wb' ) as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' ,snake_case )
SCREAMING_SNAKE_CASE =AlbertTokenizer.from_pretrained(snake_case )
finally:
os.remove(snake_case )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json' ,'wb' ) as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' ,snake_case )
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size ,1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json' )
def _lowerCAmelCase ( self : int ):
# This test is for deprecated behavior and can be removed in v5
SCREAMING_SNAKE_CASE =AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' )
@is_staging_test
class a_ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def _lowerCAmelCase ( cls : List[Any] ):
SCREAMING_SNAKE_CASE =TOKEN
HfFolder.save_token(snake_case )
@classmethod
def _lowerCAmelCase ( cls : Tuple ):
try:
delete_repo(token=cls._token ,repo_id='test-tokenizer' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='valid_org/test-tokenizer-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='test-dynamic-tokenizer' )
except HTTPError:
pass
def _lowerCAmelCase ( self : Any ):
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' )
with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE =BertTokenizer(snake_case )
tokenizer.push_to_hub('test-tokenizer' ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id='test-tokenizer' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(snake_case ,repo_id='test-tokenizer' ,push_to_hub=snake_case ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
def _lowerCAmelCase ( self : Optional[Any] ):
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' )
with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE =BertTokenizer(snake_case )
tokenizer.push_to_hub('valid_org/test-tokenizer-org' ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id='valid_org/test-tokenizer-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
snake_case ,repo_id='valid_org/test-tokenizer-org' ,push_to_hub=snake_case ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
@require_tokenizers
def _lowerCAmelCase ( self : str ):
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' )
with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE =CustomTokenizer(snake_case )
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer' ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=snake_case )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizer' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,'vocab.txt' )
with open(snake_case ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE =BertTokenizerFast.from_pretrained(snake_case )
bert_tokenizer.save_pretrained(snake_case )
SCREAMING_SNAKE_CASE =CustomTokenizerFast.from_pretrained(snake_case )
tokenizer.push_to_hub('test-dynamic-tokenizer' ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=snake_case )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizerFast' )
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(
f'{USER}/test-dynamic-tokenizer' ,use_fast=snake_case ,trust_remote_code=snake_case )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizer' )
class a_ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('Hello 友達' )
self.assertEqual(trie.data ,{'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} )
trie.add('Hello' )
trie.data
self.assertEqual(trie.data ,{'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} )
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE =Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) ,['[CLS] This is a extra_id_100'] )
trie.add('[CLS]' )
trie.add('extra_id_1' )
trie.add('extra_id_100' )
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) ,['[CLS]', ' This is a ', 'extra_id_100'] )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('A' )
self.assertEqual(trie.split('ABC' ) ,['A', 'BC'] )
self.assertEqual(trie.split('BCA' ) ,['BC', 'A'] )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('TOKEN]' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) ,['This is something ', '[SPECIAL_TOKEN]'] )
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('A' )
trie.add('P' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) ,['This is something ', '[SPECIAL_TOKEN]'] )
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('AB' )
trie.add('B' )
trie.add('C' )
self.assertEqual(trie.split('ABC' ) ,['AB', 'C'] )
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =Trie()
trie.add('ABC' )
trie.add('B' )
trie.add('CD' )
self.assertEqual(trie.split('ABCD' ) ,['ABC', 'D'] )
def _lowerCAmelCase ( self : Optional[Any] ):
# Even if the offsets are wrong, we necessarily output correct string
# parts.
SCREAMING_SNAKE_CASE =Trie()
SCREAMING_SNAKE_CASE =trie.cut_text('ABC' ,[0, 0, 2, 1, 2, 3] )
self.assertEqual(snake_case ,['AB', 'C'] )
| 334 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
UpperCAmelCase: int = logging.get_logger(__name__)
UpperCAmelCase: List[str] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCAmelCase: str = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
"tokenizer_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json",
},
}
UpperCAmelCase: Tuple = {
"facebook/bart-base": 1_024,
"facebook/bart-large": 1_024,
"facebook/bart-large-mnli": 1_024,
"facebook/bart-large-cnn": 1_024,
"facebook/bart-large-xsum": 1_024,
"yjernite/bart_eli5": 1_024,
}
class UpperCamelCase ( _a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Tuple = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : List[Any] = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE_ : Any = BartTokenizer
def __init__( self ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_="replace" ,UpperCAmelCase_="<s>" ,UpperCAmelCase_="</s>" ,UpperCAmelCase_="</s>" ,UpperCAmelCase_="<s>" ,UpperCAmelCase_="<unk>" ,UpperCAmelCase_="<pad>" ,UpperCAmelCase_="<mask>" ,UpperCAmelCase_=False ,UpperCAmelCase_=True ,**UpperCAmelCase_ ,):
super().__init__(
UpperCAmelCase_ ,UpperCAmelCase_ ,tokenizer_file=UpperCAmelCase_ ,errors=UpperCAmelCase_ ,bos_token=UpperCAmelCase_ ,eos_token=UpperCAmelCase_ ,sep_token=UpperCAmelCase_ ,cls_token=UpperCAmelCase_ ,unk_token=UpperCAmelCase_ ,pad_token=UpperCAmelCase_ ,mask_token=UpperCAmelCase_ ,add_prefix_space=UpperCAmelCase_ ,trim_offsets=UpperCAmelCase_ ,**UpperCAmelCase_ ,)
_lowercase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" ,UpperCAmelCase_ ) != add_prefix_space:
_lowercase : Union[str, Any] = getattr(UpperCAmelCase_ ,pre_tok_state.pop("""type""" ) )
_lowercase : Tuple = add_prefix_space
_lowercase : Union[str, Any] = pre_tok_class(**UpperCAmelCase_ )
_lowercase : List[Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_lowercase : Tuple = 'post_processor'
_lowercase : Union[str, Any] = getattr(self.backend_tokenizer ,UpperCAmelCase_ ,UpperCAmelCase_ )
if tokenizer_component_instance:
_lowercase : int = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowercase : Optional[Any] = tuple(state["""sep"""] )
if "cls" in state:
_lowercase : str = tuple(state["""cls"""] )
_lowercase : Tuple = False
if state.get("""add_prefix_space""" ,UpperCAmelCase_ ) != add_prefix_space:
_lowercase : Optional[int] = add_prefix_space
_lowercase : Optional[Any] = True
if state.get("""trim_offsets""" ,UpperCAmelCase_ ) != trim_offsets:
_lowercase : List[str] = trim_offsets
_lowercase : List[str] = True
if changes_to_apply:
_lowercase : Optional[int] = getattr(UpperCAmelCase_ ,state.pop("""type""" ) )
_lowercase : Optional[Any] = component_class(**UpperCAmelCase_ )
setattr(self.backend_tokenizer ,UpperCAmelCase_ ,UpperCAmelCase_ )
@property
def lowerCamelCase__ ( self ):
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : Union[str, Any] = AddedToken(UpperCAmelCase_ ,lstrip=UpperCAmelCase_ ,rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ) else value
_lowercase : Union[str, Any] = value
def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
_lowercase : str = kwargs.get("""is_split_into_words""" ,UpperCAmelCase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
_lowercase : Any = kwargs.get("""is_split_into_words""" ,UpperCAmelCase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ):
_lowercase : Union[str, Any] = self._tokenizer.model.save(UpperCAmelCase_ ,name=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_=None ):
_lowercase : int = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ):
_lowercase : Dict = [self.sep_token_id]
_lowercase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 368 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase: Optional[int] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( snake_case , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = XLNetTokenizer
SCREAMING_SNAKE_CASE_ : Dict = XLNetTokenizerFast
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
def lowerCamelCase__ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_lowercase : Dict = XLNetTokenizer(UpperCAmelCase_ ,keep_accents=UpperCAmelCase_ )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = """<s>"""
_lowercase : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) ,UpperCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""<unk>""" )
self.assertEqual(vocab_keys[1] ,"""<s>""" )
self.assertEqual(vocab_keys[-1] ,"""<eod>""" )
self.assertEqual(len(UpperCAmelCase_ ) ,10_06 )
def lowerCamelCase__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size ,10_00 )
def lowerCamelCase__ ( self ):
_lowercase : int = XLNetTokenizer(UpperCAmelCase_ ,keep_accents=UpperCAmelCase_ )
_lowercase : Union[str, Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(UpperCAmelCase_ ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) ,[2_85, 46, 10, 1_70, 3_82] )
_lowercase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase_ ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] ,)
_lowercase : List[Any] = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ ,[8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
_lowercase : Optional[Any] = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] ,)
def lowerCamelCase__ ( self ):
_lowercase : Dict = XLNetTokenizer(UpperCAmelCase_ ,do_lower_case=UpperCAmelCase_ )
_lowercase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase_ ,[
SPIECE_UNDERLINE + """""",
"""i""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] ,)
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""▁he""", """ll""", """o"""] )
def lowerCamelCase__ ( self ):
_lowercase : int = XLNetTokenizer(UpperCAmelCase_ ,do_lower_case=UpperCAmelCase_ )
_lowercase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase_ ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] ,)
@slow
def lowerCamelCase__ ( self ):
_lowercase : Dict = XLNetTokenizer.from_pretrained("""xlnet-base-cased""" )
_lowercase : int = tokenizer.encode("""sequence builders""" ,add_special_tokens=UpperCAmelCase_ )
_lowercase : List[str] = tokenizer.encode("""multi-sequence build""" ,add_special_tokens=UpperCAmelCase_ )
_lowercase : Optional[int] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ )
_lowercase : Dict = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ ,UpperCAmelCase_ )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def lowerCamelCase__ ( self ):
# fmt: off
_lowercase : Union[str, Any] = {"""input_ids""": [[17, 2_14_42, 2_70, 17, 10, 1_46_45, 3_18, 34, 17, 45_46, 31_45, 7_87, 13, 77_52, 2_20_18, 23, 21, 17, 45_46, 31_45, 7_87, 13, 33_52, 1_44_31, 13, 55_00, 11, 11_76, 5_80, 13, 1_68_19, 47_97, 23, 17, 10, 1_71_35, 6_58, 19, 4_57, 79_32, 13, 1_84, 19, 31_54, 1_71_35, 64_68, 19, 14_04, 1_22_69, 19, 42_29, 53_56, 1_62_64, 46, 19, 17, 2_05_45, 1_03_95, 9, 9, 9, 11, 28, 64_21, 95_31, 2_07_29, 17, 10, 3_53, 1_70_22, 11, 21, 64_21, 95_31, 1_69_49, 17, 10, 1_15_09, 7_53, 11, 33, 95, 24_21, 73_85, 9_56, 1_44_31, 26_26, 25, 8_42, 73_85, 48_36, 21, 14_29, 22_72, 98_55, 31_20, 1_61, 2_47_38, 19, 1_32_03, 6_58, 2_18, 7_87, 21, 4_30, 1_84_82, 8_47, 26_37, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_22, 2_21_78, 27, 10_64, 22, 9_56, 13, 1_11_01, 14_29, 58_54, 2_43_13, 1_89_53, 40, 4_22, 2_43_66, 68, 17_58, 37, 1_04_83, 1_42_57, 31, 2_07, 2_63, 21, 2_03, 37_73, 25, 71, 97_35, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 20_49, 34_42, 17, 1_38_94, 33_80, 23, 95, 18, 1_76_34, 22_88, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ ,model_name="""xlnet-base-cased""" ,revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" ,)
| 336 | 0 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A : Dict = logging.get_logger(__name__)
def __UpperCamelCase ( _A : Union[str, Any] ) ->List[str]:
"""simple docstring"""
lowerCamelCase_ =MobileNetVaConfig(layer_norm_eps=0.0_0_1 )
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""" )
lowerCamelCase_ =re.match(R"""^mobilenet_v1_([^_]*)_([^_]*)$""" , _A )
if matches:
lowerCamelCase_ =float(matches[1] )
lowerCamelCase_ =int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
lowerCamelCase_ =1001
lowerCamelCase_ ="""imagenet-1k-id2label.json"""
lowerCamelCase_ ="""huggingface/label-files"""
lowerCamelCase_ =json.load(open(hf_hub_download(_A , _A , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase_ ={int(_A ) + 1: v for k, v in idalabel.items()}
lowerCamelCase_ ="""background"""
lowerCamelCase_ =idalabel
lowerCamelCase_ ={v: k for k, v in idalabel.items()}
return config
def __UpperCamelCase ( ) ->int:
"""simple docstring"""
lowerCamelCase_ ="""http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase_ =Image.open(requests.get(_A , stream=_A ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( _A : List[Any] , _A : Any , _A : str , _A : int=False ) ->List[str]:
"""simple docstring"""
lowerCamelCase_ =get_mobilenet_va_config(_A )
# Load 🤗 model
lowerCamelCase_ =MobileNetVaForImageClassification(_A ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(_A , _A , _A )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
lowerCamelCase_ =MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 32} , )
lowerCamelCase_ =image_processor(images=prepare_img() , return_tensors="""pt""" )
lowerCamelCase_ =model(**_A )
lowerCamelCase_ =outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
lowerCamelCase_ =torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] )
elif model_name == "mobilenet_v1_0.75_192":
lowerCamelCase_ =torch.tensor([-3.9_4_4_0, -2.3_1_4_1, -0.3_3_3_3] )
else:
lowerCamelCase_ =None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , _A , atol=1E-4 )
Path(_A ).mkdir(exist_ok=_A )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_A )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_A )
if push_to_hub:
print("""Pushing to the hub...""" )
lowerCamelCase_ ="""google/""" + model_name
image_processor.push_to_hub(_A )
model.push_to_hub(_A )
if __name__ == "__main__":
__A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='mobilenet_v1_1.0_224',
type=str,
help='Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.',
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original TensorFlow checkpoint (.ckpt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__A : List[str] = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 154 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
__A : Optional[int] = 'Run commands across TPU VMs for initial setup before running `accelerate launch`.'
def __UpperCamelCase ( _A : Dict=None ) ->Dict:
"""simple docstring"""
if subparsers is not None:
lowerCamelCase_ =subparsers.add_parser("""tpu-config""" , description=_description )
else:
lowerCamelCase_ =argparse.ArgumentParser("""Accelerate tpu-config command""" , description=_description )
# Core arguments
lowerCamelCase_ =parser.add_argument_group(
"""Config Arguments""" , """Arguments that can be configured through `accelerate config`.""" )
config_args.add_argument(
"""--config_file""" , type=_A , default=_A , help="""Path to the config file to use for accelerate.""" , )
config_args.add_argument(
"""--tpu_name""" , default=_A , help="""The name of the TPU to use. If not specified, will use the TPU specified in the config file.""" , )
config_args.add_argument(
"""--tpu_zone""" , default=_A , help="""The zone of the TPU to use. If not specified, will use the zone specified in the config file.""" , )
lowerCamelCase_ =parser.add_argument_group("""TPU Arguments""" , """Arguments for options ran inside the TPU.""" )
pod_args.add_argument(
"""--use_alpha""" , action="""store_true""" , help="""Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.""" , )
pod_args.add_argument(
"""--command_file""" , default=_A , help="""The path to the file containing the commands to run on the pod on startup.""" , )
pod_args.add_argument(
"""--command""" , action="""append""" , nargs="""+""" , help="""A command to run on the pod. Can be passed multiple times.""" , )
pod_args.add_argument(
"""--install_accelerate""" , action="""store_true""" , help="""Whether to install accelerate on the pod. Defaults to False.""" , )
pod_args.add_argument(
"""--accelerate_version""" , default="""latest""" , help="""The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.""" , )
pod_args.add_argument(
"""--debug""" , action="""store_true""" , help="""If set, will print the command that would be run instead of running it.""" )
if subparsers is not None:
parser.set_defaults(func=_A )
return parser
def __UpperCamelCase ( _A : Tuple ) ->Optional[Any]:
"""simple docstring"""
lowerCamelCase_ =None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(_A ):
lowerCamelCase_ =load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
lowerCamelCase_ =defaults.command_file
if not args.command and defaults.commands is not None:
lowerCamelCase_ =defaults.commands
if not args.tpu_name:
lowerCamelCase_ =defaults.tpu_name
if not args.tpu_zone:
lowerCamelCase_ =defaults.tpu_zone
if args.accelerate_version == "dev":
lowerCamelCase_ ="""git+https://github.com/huggingface/accelerate.git"""
elif args.accelerate_version == "latest":
lowerCamelCase_ ="""accelerate -U"""
elif isinstance(parse(args.accelerate_version ) , _A ):
lowerCamelCase_ =f'accelerate=={args.accelerate_version}'
if not args.command_file and not args.command:
raise ValueError("""You must specify either a command file or a command to run on the pod.""" )
if args.command_file:
with open(args.command_file , """r""" ) as f:
lowerCamelCase_ =[f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , _A ):
lowerCamelCase_ =[line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
lowerCamelCase_ =["""cd /usr/share"""]
if args.install_accelerate:
new_cmd += [f'pip install {args.accelerate_version}']
new_cmd += args.command
lowerCamelCase_ ="""; """.join(_A )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
lowerCamelCase_ =["""gcloud"""]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f'Running {" ".join(_A )}' )
return
subprocess.run(_A )
print("""Successfully setup pod.""" )
def __UpperCamelCase ( ) ->Optional[Any]:
"""simple docstring"""
lowerCamelCase_ =tpu_command_parser()
lowerCamelCase_ =parser.parse_args()
tpu_command_launcher(_A )
| 154 | 1 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def _a( UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] =[
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
'decoder.output_projection.weight',
]
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase, _UpperCAmelCase )
def _a( UpperCamelCase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =emb.weight.shape
SCREAMING_SNAKE_CASE__ : Tuple =nn.Linear(_UpperCAmelCase, _UpperCAmelCase, bias=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] =emb.weight.data
return lin_layer
def _a( UpperCamelCase__ : Dict, UpperCamelCase__ : Dict="facebook/mbart-large-en-ro", UpperCamelCase__ : List[Any]=False, UpperCamelCase__ : List[str]=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =torch.load(_UpperCAmelCase, map_location='''cpu''' )['model']
remove_ignore_keys_(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] =state_dict['encoder.embed_tokens.weight'].shape[0]
SCREAMING_SNAKE_CASE__ : str =MBartConfig.from_pretrained(_UpperCAmelCase, vocab_size=_UpperCAmelCase )
if mbart_aa and finetuned:
SCREAMING_SNAKE_CASE__ : Optional[Any] ='relu'
SCREAMING_SNAKE_CASE__ : str =state_dict['decoder.embed_tokens.weight']
SCREAMING_SNAKE_CASE__ : Optional[int] =MBartForConditionalGeneration(_UpperCAmelCase )
model.model.load_state_dict(_UpperCAmelCase )
if finetuned:
SCREAMING_SNAKE_CASE__ : Optional[int] =make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config',
default='facebook/mbart-large-cc25',
type=str,
help='Which huggingface architecture to use: mbart-large',
)
parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint')
parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint')
a_ = parser.parse_args()
a_ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 359 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
a_ = [
'good first issue',
'good second issue',
'good difficult issue',
'feature request',
'new model',
'wip',
]
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] =Github(os.environ['''GITHUB_TOKEN'''] )
SCREAMING_SNAKE_CASE__ : List[Any] =g.get_repo('''huggingface/transformers''' )
SCREAMING_SNAKE_CASE__ : List[Any] =repo.get_issues(state='''open''' )
for issue in open_issues:
SCREAMING_SNAKE_CASE__ : List[Any] =sorted([comment for comment in issue.get_comments()], key=lambda UpperCamelCase__ : i.created_at, reverse=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Dict =comments[0] if len(UpperCamelCase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 2_3
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 222 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase = {
"""configuration_xmod""": [
"""XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XmodConfig""",
"""XmodOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""XMOD_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XmodForCausalLM""",
"""XmodForMaskedLM""",
"""XmodForMultipleChoice""",
"""XmodForQuestionAnswering""",
"""XmodForSequenceClassification""",
"""XmodForTokenClassification""",
"""XmodModel""",
"""XmodPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 166 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase = {
"""configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""],
"""feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""],
"""processing_mctct""": ["""MCTCTProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MCTCTForCTC""",
"""MCTCTModel""",
"""MCTCTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 166 | 1 |
'''simple docstring'''
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class UpperCAmelCase__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = FlaxMTaForConditionalGeneration.from_pretrained("""google/mt5-small""" )
__UpperCamelCase = AutoTokenizer.from_pretrained("""google/mt5-small""" )
__UpperCamelCase = tokenizer("""Hello there""" , return_tensors="""np""" ).input_ids
__UpperCamelCase = tokenizer("""Hi I am""" , return_tensors="""np""" ).input_ids
__UpperCamelCase = shift_tokens_right(lowercase , model.config.pad_token_id , model.config.decoder_start_token_id )
__UpperCamelCase = model(lowercase , decoder_input_ids=lowercase ).logits
__UpperCamelCase = optax.softmax_cross_entropy(lowercase , onehot(lowercase , logits.shape[-1] ) ).mean()
__UpperCamelCase = -(labels.shape[-1] * loss.item())
__UpperCamelCase = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 243 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase__ ( unittest.TestCase):
def __lowerCamelCase ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __lowerCamelCase ( self ) -> Any:
torch.manual_seed(0 )
__UpperCamelCase = UNetaDModel(
sample_size=(3_2, 6_4) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return model
@property
def __lowerCamelCase ( self ) -> List[Any]:
torch.manual_seed(0 )
__UpperCamelCase = UNetaDConditionModel(
sample_size=(6_4, 3_2) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , cross_attention_dim=1_0 , )
return model
@property
def __lowerCamelCase ( self ) -> Optional[Any]:
torch.manual_seed(0 )
__UpperCamelCase = AutoencoderKL(
sample_size=(1_2_8, 6_4) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , )
__UpperCamelCase = UNetaDModel(
sample_size=(6_4, 3_2) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return vqvae, unet
@slow
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
__UpperCamelCase = DDPMScheduler()
__UpperCamelCase = AudioDiffusionPipeline(vqvae=lowercase , unet=self.dummy_unet , mel=lowercase , scheduler=lowercase )
__UpperCamelCase = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(4_2 )
__UpperCamelCase = pipe(generator=lowercase , steps=4 )
__UpperCamelCase = output.audios[0]
__UpperCamelCase = output.images[0]
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(4_2 )
__UpperCamelCase = pipe(generator=lowercase , steps=4 , return_dict=lowercase )
__UpperCamelCase = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
__UpperCamelCase = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:1_0]
__UpperCamelCase = np.frombuffer(image_from_tuple.tobytes() , dtype="""uint8""" )[:1_0]
__UpperCamelCase = np.array([6_9, 2_5_5, 2_5_5, 2_5_5, 0, 0, 7_7, 1_8_1, 1_2, 1_2_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
__UpperCamelCase = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
__UpperCamelCase = DDIMScheduler()
__UpperCamelCase = self.dummy_vqvae_and_unet
__UpperCamelCase = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=lowercase , scheduler=lowercase )
__UpperCamelCase = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
np.random.seed(0 )
__UpperCamelCase = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(4_2 )
__UpperCamelCase = pipe(raw_audio=lowercase , generator=lowercase , start_step=5 , steps=1_0 )
__UpperCamelCase = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
__UpperCamelCase = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:1_0]
__UpperCamelCase = np.array([1_2_0, 1_1_7, 1_1_0, 1_0_9, 1_3_8, 1_6_7, 1_3_8, 1_4_8, 1_3_2, 1_2_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
__UpperCamelCase = self.dummy_unet_condition
__UpperCamelCase = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=lowercase , mel=lowercase , scheduler=lowercase )
__UpperCamelCase = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
np.random.seed(0 )
__UpperCamelCase = torch.rand((1, 1, 1_0) )
__UpperCamelCase = pipe(generator=lowercase , encoding=lowercase )
__UpperCamelCase = output.images[0]
__UpperCamelCase = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:1_0]
__UpperCamelCase = np.array([1_0_7, 1_0_3, 1_2_0, 1_2_7, 1_4_2, 1_2_2, 1_1_3, 1_2_2, 9_7, 1_1_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase):
def __lowerCamelCase ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = torch_device
__UpperCamelCase = DiffusionPipeline.from_pretrained("""teticio/audio-diffusion-ddim-256""" )
__UpperCamelCase = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(4_2 )
__UpperCamelCase = pipe(generator=lowercase )
__UpperCamelCase = output.audios[0]
__UpperCamelCase = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
__UpperCamelCase = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:1_0]
__UpperCamelCase = np.array([1_5_1, 1_6_7, 1_5_4, 1_4_4, 1_2_2, 1_3_4, 1_2_1, 1_0_5, 7_0, 2_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 243 | 1 |
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
a_ = logging.getLogger()
def a__ ( _UpperCamelCase : Optional[Any] ):
__lowerCamelCase = {}
__lowerCamelCase = os.path.join(_UpperCamelCase ,'''all_results.json''' )
if os.path.exists(_UpperCamelCase ):
with open(_UpperCamelCase ,'''r''' ) as f:
__lowerCamelCase = json.load(_UpperCamelCase )
else:
raise ValueError(F"""can't find {path}""" )
return results
a_ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class __lowerCAmelCase ( lowerCAmelCase__ ):
def lowerCamelCase ( self ):
'''simple docstring'''
import xla_spawn
__lowerCamelCase = self.get_auto_remove_tmp_dir()
__lowerCamelCase = F"""
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(__UpperCAmelCase , '''argv''' , __UpperCAmelCase ):
__lowerCamelCase = time()
xla_spawn.main()
__lowerCamelCase = time()
__lowerCamelCase = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def lowerCamelCase ( self ):
'''simple docstring'''
import xla_spawn
__lowerCamelCase = '''
./tests/test_trainer_tpu.py
--num_cores=8
./tests/test_trainer_tpu.py
'''.split()
with patch.object(__UpperCAmelCase , '''argv''' , __UpperCAmelCase ):
xla_spawn.main()
| 330 |
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
a_ = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase__ )
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
requires_backends(self , '''vision''' )
self.check_model_type(__UpperCAmelCase )
def __call__( self , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return super().__call__(__UpperCAmelCase , **__UpperCAmelCase )
def lowerCamelCase ( self , **__UpperCAmelCase ):
'''simple docstring'''
return {}, {}, {}
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = load_image(__UpperCAmelCase )
__lowerCamelCase = image.size
__lowerCamelCase = self.image_processor(images=__UpperCAmelCase , return_tensors=self.framework )
return model_inputs
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.model(**__UpperCAmelCase )
return model_outputs
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = model_outputs.predicted_depth
__lowerCamelCase = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=__UpperCAmelCase )
__lowerCamelCase = prediction.squeeze().cpu().numpy()
__lowerCamelCase = (output * 255 / np.max(__UpperCAmelCase )).astype('''uint8''' )
__lowerCamelCase = Image.fromarray(__UpperCAmelCase )
__lowerCamelCase = {}
__lowerCamelCase = predicted_depth
__lowerCamelCase = depth
return output_dict
| 330 | 1 |
import math
def A ( a_ ) -> bool:
return math.sqrt(a_ ) * math.sqrt(a_ ) == num
def A ( a_ ) -> bool:
__UpperCamelCase : List[Any] =0
__UpperCamelCase : Dict =n
while left <= right:
__UpperCamelCase : Optional[Any] =(left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
__UpperCamelCase : Optional[Any] =mid - 1
else:
__UpperCamelCase : int =mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 245 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
A_ :Tuple = '''platform'''
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def A ( a_ ,a_ ,a_=None ,a_=None ,a_=None ,a_=None ,a_=None ,a_=None ,) -> int:
if attention_mask is None:
__UpperCamelCase : Any =np.where(input_ids != config.pad_token_id ,1 ,0 )
if decoder_attention_mask is None:
__UpperCamelCase : Optional[Any] =np.where(decoder_input_ids != config.pad_token_id ,1 ,0 )
if head_mask is None:
__UpperCamelCase : str =np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__UpperCamelCase : Any =np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__UpperCamelCase : Optional[int] =np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class __A :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=99 , lowerCamelCase__=16 , lowerCamelCase__=2 , lowerCamelCase__=4 , lowerCamelCase__=4 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=32 , lowerCamelCase__=2 , lowerCamelCase__=1 , lowerCamelCase__=0 , lowerCamelCase__=0.02 , ):
"""simple docstring"""
__UpperCamelCase : Tuple =parent
__UpperCamelCase : str =batch_size
__UpperCamelCase : Optional[Any] =seq_length
__UpperCamelCase : List[Any] =is_training
__UpperCamelCase : int =use_labels
__UpperCamelCase : int =vocab_size
__UpperCamelCase : Any =hidden_size
__UpperCamelCase : List[str] =num_hidden_layers
__UpperCamelCase : Any =num_attention_heads
__UpperCamelCase : int =intermediate_size
__UpperCamelCase : List[Any] =hidden_act
__UpperCamelCase : Optional[Any] =hidden_dropout_prob
__UpperCamelCase : int =attention_probs_dropout_prob
__UpperCamelCase : Tuple =max_position_embeddings
__UpperCamelCase : List[Any] =eos_token_id
__UpperCamelCase : Tuple =pad_token_id
__UpperCamelCase : Any =bos_token_id
__UpperCamelCase : Tuple =initializer_range
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : str =np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__UpperCamelCase : Any =np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__UpperCamelCase : Any =shift_tokens_right(lowerCamelCase__ , 1 , 2 )
__UpperCamelCase : List[str] =BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowerCamelCase__ , )
__UpperCamelCase : Dict =prepare_blenderbot_inputs_dict(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return config, inputs_dict
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : Optional[Any] =self.prepare_config_and_inputs()
return config, inputs_dict
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =20
__UpperCamelCase : Optional[int] =model_class_name(lowerCamelCase__ )
__UpperCamelCase : Tuple =model.encode(inputs_dict['input_ids'] )
__UpperCamelCase , __UpperCamelCase : Optional[Any] =(
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
__UpperCamelCase : Any =model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Tuple =jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
__UpperCamelCase : List[str] =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCamelCase : Any =model.decode(
decoder_input_ids[:, :-1] , lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , decoder_position_ids=lowerCamelCase__ , )
__UpperCamelCase : List[str] =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
__UpperCamelCase : Tuple =model.decode(
decoder_input_ids[:, -1:] , lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCamelCase__ , )
__UpperCamelCase : Tuple =model.decode(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Tuple =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}' )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =20
__UpperCamelCase : int =model_class_name(lowerCamelCase__ )
__UpperCamelCase : Optional[int] =model.encode(inputs_dict['input_ids'] )
__UpperCamelCase , __UpperCamelCase : Any =(
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
__UpperCamelCase : int =jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__UpperCamelCase : List[str] =model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Any =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCamelCase : Any =model.decode(
decoder_input_ids[:, :-1] , lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , decoder_position_ids=lowerCamelCase__ , )
__UpperCamelCase : Optional[Any] =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
__UpperCamelCase : Tuple =model.decode(
decoder_input_ids[:, -1:] , lowerCamelCase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCamelCase__ , decoder_position_ids=lowerCamelCase__ , )
__UpperCamelCase : Optional[int] =model.decode(lowerCamelCase__ , lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ )
__UpperCamelCase : Tuple =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}' )
@require_flax
class __A ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ : Any =9_9
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Any =np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
__UpperCamelCase : int =input_ids.shape[0]
__UpperCamelCase : List[str] =BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : int =self._get_config_and_data()
__UpperCamelCase : Optional[Any] =FlaxBlenderbotSmallForConditionalGeneration(lowerCamelCase__ )
__UpperCamelCase : int =lm_model(input_ids=lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =(batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
__UpperCamelCase : List[str] =FlaxBlenderbotSmallForConditionalGeneration(lowerCamelCase__ )
__UpperCamelCase : Any =np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
__UpperCamelCase : int =np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
__UpperCamelCase : Any =lm_model(input_ids=lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ )
__UpperCamelCase : Optional[Any] =(*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[int] =np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
__UpperCamelCase : Tuple =shift_tokens_right(lowerCamelCase__ , 1 , 2 )
__UpperCamelCase : Optional[int] =np.equal(lowerCamelCase__ , 1 ).astype(np.floataa ).sum()
__UpperCamelCase : Optional[Any] =np.equal(lowerCamelCase__ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(lowerCamelCase__ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class __A ( a , unittest.TestCase , a ):
"""simple docstring"""
UpperCamelCase__ : Any =True
UpperCamelCase__ : List[Any] =(
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
UpperCamelCase__ : Dict =(FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[int] =FlaxBlenderbotSmallModelTester(self )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : Dict =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : Any =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCamelCase : Tuple =self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =model_class(lowerCamelCase__ )
@jax.jit
def encode_jitted(lowerCamelCase__ , lowerCamelCase__=None , **lowerCamelCase__ ):
return model.encode(input_ids=lowerCamelCase__ , attention_mask=lowerCamelCase__ )
with self.subTest('JIT Enabled' ):
__UpperCamelCase : Union[str, Any] =encode_jitted(**lowerCamelCase__ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__UpperCamelCase : Any =encode_jitted(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
for jitted_output, output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCamelCase : Tuple =model_class(lowerCamelCase__ )
__UpperCamelCase : Tuple =model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
__UpperCamelCase : Any ={
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
return model.decode(
decoder_input_ids=lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ , encoder_outputs=lowerCamelCase__ , )
with self.subTest('JIT Enabled' ):
__UpperCamelCase : Optional[Any] =decode_jitted(**lowerCamelCase__ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__UpperCamelCase : int =decode_jitted(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
for jitted_output, output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
__UpperCamelCase : Optional[Any] =model_class_name.from_pretrained('facebook/blenderbot_small-90M' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__UpperCamelCase : Optional[Any] =np.ones((1, 1) ) * model.config.eos_token_id
__UpperCamelCase : Union[str, Any] =model(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
| 245 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'microsoft/table-transformer-detection': (
'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE_ ( a__ ):
__lowerCAmelCase = """table-transformer"""
__lowerCAmelCase = ["""past_key_values"""]
__lowerCAmelCase = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Any , lowerCamelCase_ : Dict=True , lowerCamelCase_ : Dict=None , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : List[Any]=100 , lowerCamelCase_ : str=6 , lowerCamelCase_ : Dict=2048 , lowerCamelCase_ : int=8 , lowerCamelCase_ : Dict=6 , lowerCamelCase_ : Optional[Any]=2048 , lowerCamelCase_ : List[str]=8 , lowerCamelCase_ : Tuple=0.0 , lowerCamelCase_ : Dict=0.0 , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : List[Any]="relu" , lowerCamelCase_ : Optional[Any]=256 , lowerCamelCase_ : int=0.1 , lowerCamelCase_ : List[str]=0.0 , lowerCamelCase_ : Optional[int]=0.0 , lowerCamelCase_ : Tuple=0.0_2 , lowerCamelCase_ : Optional[int]=1.0 , lowerCamelCase_ : Tuple=False , lowerCamelCase_ : List[Any]="sine" , lowerCamelCase_ : int="resnet50" , lowerCamelCase_ : str=True , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : Any=1 , lowerCamelCase_ : Any=5 , lowerCamelCase_ : Dict=2 , lowerCamelCase_ : Union[str, Any]=1 , lowerCamelCase_ : Tuple=1 , lowerCamelCase_ : str=5 , lowerCamelCase_ : Optional[Any]=2 , lowerCamelCase_ : int=0.1 , **lowerCamelCase_ : Any , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
UpperCamelCase = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ):
UpperCamelCase = backbone_config.get("""model_type""" )
UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase = config_class.from_dict(lowerCamelCase_ )
# set timm attributes to None
UpperCamelCase , UpperCamelCase , UpperCamelCase = None, None, None
UpperCamelCase = use_timm_backbone
UpperCamelCase = backbone_config
UpperCamelCase = num_channels
UpperCamelCase = num_queries
UpperCamelCase = d_model
UpperCamelCase = encoder_ffn_dim
UpperCamelCase = encoder_layers
UpperCamelCase = encoder_attention_heads
UpperCamelCase = decoder_ffn_dim
UpperCamelCase = decoder_layers
UpperCamelCase = decoder_attention_heads
UpperCamelCase = dropout
UpperCamelCase = attention_dropout
UpperCamelCase = activation_dropout
UpperCamelCase = activation_function
UpperCamelCase = init_std
UpperCamelCase = init_xavier_std
UpperCamelCase = encoder_layerdrop
UpperCamelCase = decoder_layerdrop
UpperCamelCase = encoder_layers
UpperCamelCase = auxiliary_loss
UpperCamelCase = position_embedding_type
UpperCamelCase = backbone
UpperCamelCase = use_pretrained_backbone
UpperCamelCase = dilation
# Hungarian matcher
UpperCamelCase = class_cost
UpperCamelCase = bbox_cost
UpperCamelCase = giou_cost
# Loss coefficients
UpperCamelCase = mask_loss_coefficient
UpperCamelCase = dice_loss_coefficient
UpperCamelCase = bbox_loss_coefficient
UpperCamelCase = giou_loss_coefficient
UpperCamelCase = eos_coefficient
super().__init__(is_encoder_decoder=lowerCamelCase_ , **lowerCamelCase_ )
@property
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
return self.d_model
class SCREAMING_SNAKE_CASE_ ( a__ ):
__lowerCAmelCase = version.parse("""1.11""" )
@property
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
return 1E-5
@property
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
return 12
| 343 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
a__ : int = logging.get_logger(__name__)
a__ : List[str] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a__ : List[Any] = {
'vocab_file': {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'
),
}
}
a__ : Optional[Any] = {
'junnyu/roformer_chinese_small': 1_5_3_6,
'junnyu/roformer_chinese_base': 1_5_3_6,
'junnyu/roformer_chinese_char_small': 5_1_2,
'junnyu/roformer_chinese_char_base': 5_1_2,
'junnyu/roformer_small_discriminator': 1_2_8,
'junnyu/roformer_small_generator': 1_2_8,
}
a__ : str = {
'junnyu/roformer_chinese_small': {'do_lower_case': True},
'junnyu/roformer_chinese_base': {'do_lower_case': True},
'junnyu/roformer_chinese_char_small': {'do_lower_case': True},
'junnyu/roformer_chinese_char_base': {'do_lower_case': True},
'junnyu/roformer_small_discriminator': {'do_lower_case': True},
'junnyu/roformer_small_generator': {'do_lower_case': True},
}
class lowercase_ ( a__ ):
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase = RoFormerTokenizer
def __init__( self , a=None , a=None , a=True , a="[UNK]" , a="[SEP]" , a="[PAD]" , a="[CLS]" , a="[MASK]" , a=True , a=None , **a , ):
super().__init__(
a , tokenizer_file=a , do_lower_case=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , tokenize_chinese_chars=a , strip_accents=a , **a , )
UpperCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("lowercase" , a ) != do_lower_case
or pre_tok_state.get("strip_accents" , a ) != strip_accents
):
UpperCamelCase__ = getattr(a , pre_tok_state.pop("type" ) )
UpperCamelCase__ = do_lower_case
UpperCamelCase__ = strip_accents
UpperCamelCase__ = pre_tok_class(**a )
UpperCamelCase__ = do_lower_case
def __getstate__( self ):
UpperCamelCase__ = self.__dict__.copy()
UpperCamelCase__ = BertPreTokenizer()
return state
def __setstate__( self , a ):
UpperCamelCase__ = d
UpperCamelCase__ = self.__dict__["_tokenizer"].get_vocab()
UpperCamelCase__ = PreTokenizer.custom(JiebaPreTokenizer(a ) )
def __a ( self , a , a=None ):
UpperCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self , a , a = None ):
UpperCamelCase__ = [self.sep_token_id]
UpperCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , a , a = None ):
UpperCamelCase__ = self._tokenizer.model.save(a , name=a )
return tuple(a )
def __a ( self , a , a=None , a=None , a=False , **a , ):
UpperCamelCase__ = BertPreTokenizer()
return super().save_pretrained(a , a , a , a , **a )
| 80 | 0 |
'''simple docstring'''
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
__A : Union[str, Any] = [
"cross_validation.py",
"gradient_accumulation.py",
"local_sgd.py",
"multi_process_metrics.py",
"memory.py",
"automatic_gradient_accumulation.py",
"fsdp_with_peak_mem_tracking.py",
"deepspeed_with_config_support.py",
"megatron_lm_gpt_pretraining.py",
]
class __snake_case ( unittest.TestCase):
"""simple docstring"""
def __lowercase ( self : List[str] , lowerCamelCase : str , lowerCamelCase : bool , lowerCamelCase : str = None , lowerCamelCase : list = None ) -> Optional[int]:
lowerCAmelCase_ : Any = None
lowerCAmelCase_ : Optional[int] = os.path.abspath(os.path.join("""examples""" , """by_feature""" ) )
lowerCAmelCase_ : List[str] = os.path.abspath("""examples""" )
for item in os.listdir(_lowerCAmelCase ):
if item not in EXCLUDE_EXAMPLES:
lowerCAmelCase_ : Tuple = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
if os.path.isfile(_lowerCAmelCase ) and ".py" in item_path:
with self.subTest(
tested_script=_lowerCAmelCase , feature_script=_lowerCAmelCase , tested_section="""main()""" if parser_only else """training_function()""" , ):
lowerCAmelCase_ : Optional[Any] = compare_against_test(
os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowerCAmelCase_ : Any = """\n""".join(_lowerCAmelCase )
if special_strings is not None:
for string in special_strings:
lowerCAmelCase_ : Optional[int] = diff.replace(_lowerCAmelCase , """""" )
self.assertEqual(_lowerCAmelCase , """""" )
def __lowercase ( self : int ) -> List[Any]:
self.one_complete_example("""complete_nlp_example.py""" , _lowerCAmelCase )
self.one_complete_example("""complete_nlp_example.py""" , _lowerCAmelCase )
def __lowercase ( self : Union[str, Any] ) -> Tuple:
lowerCAmelCase_ : List[str] = os.path.abspath(os.path.join("""examples""" , """cv_example.py""" ) )
lowerCAmelCase_ : Dict = [
""" """ * 16 + """{\n\n""",
""" """ * 20 + """\"accuracy\": eval_metric[\"accuracy\"],\n\n""",
""" """ * 20 + """\"f1\": eval_metric[\"f1\"],\n\n""",
""" """ * 20 + """\"train_loss\": total_loss.item() / len(train_dataloader),\n\n""",
""" """ * 20 + """\"epoch\": epoch,\n\n""",
""" """ * 16 + """},\n\n""",
""" """ * 16 + """step=epoch,\n""",
""" """ * 12,
""" """ * 8 + """for step, batch in enumerate(active_dataloader):\n""",
]
self.one_complete_example("""complete_cv_example.py""" , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
self.one_complete_example("""complete_cv_example.py""" , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
@mock.patch.dict(os.environ ,{'TESTING_MOCKED_DATALOADERS': '1'})
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = False
@classmethod
def __lowercase ( cls : Any ) -> Optional[Any]:
super().setUpClass()
lowerCAmelCase_ : int = tempfile.mkdtemp()
lowerCAmelCase_ : Dict = os.path.join(cls._tmpdir , """default_config.yml""" )
write_basic_config(save_location=cls.configPath )
lowerCAmelCase_ : Dict = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def __lowercase ( cls : List[str] ) -> int:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def __lowercase ( self : str ) -> Dict:
lowerCAmelCase_ : List[Any] = F'\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """epoch_0""" ) ) )
def __lowercase ( self : Tuple ) -> List[Any]:
lowerCAmelCase_ : Dict = F'\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '.split()
lowerCAmelCase_ : Dict = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """step_2""" ) ) )
def __lowercase ( self : Union[str, Any] ) -> Optional[int]:
lowerCAmelCase_ : Any = F'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}\n '.split()
lowerCAmelCase_ : List[str] = run_command(self._launch_args + testargs , return_stdout=_lowerCAmelCase )
self.assertNotIn("""epoch 0:""" , _lowerCAmelCase )
self.assertIn("""epoch 1:""" , _lowerCAmelCase )
def __lowercase ( self : str ) -> Dict:
lowerCAmelCase_ : List[Any] = F'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}\n '.split()
lowerCAmelCase_ : Optional[Any] = run_command(self._launch_args + testargs , return_stdout=_lowerCAmelCase )
if torch.cuda.is_available():
lowerCAmelCase_ : List[str] = torch.cuda.device_count()
else:
lowerCAmelCase_ : int = 1
if num_processes > 1:
self.assertNotIn("""epoch 0:""" , _lowerCAmelCase )
self.assertIn("""epoch 1:""" , _lowerCAmelCase )
else:
self.assertIn("""epoch 0:""" , _lowerCAmelCase )
self.assertIn("""epoch 1:""" , _lowerCAmelCase )
@slow
def __lowercase ( self : Dict ) -> Optional[int]:
lowerCAmelCase_ : List[Any] = """\n examples/by_feature/cross_validation.py\n --num_folds 2\n """.split()
with mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """0"""} ):
lowerCAmelCase_ : int = run_command(self._launch_args + testargs , return_stdout=_lowerCAmelCase )
lowerCAmelCase_ : str = re.findall("""({.+})""" , _lowerCAmelCase )
lowerCAmelCase_ : List[str] = [r for r in results if """accuracy""" in r][-1]
lowerCAmelCase_ : List[str] = ast.literal_eval(_lowerCAmelCase )
self.assertGreaterEqual(results["""accuracy"""] , 0.75 )
def __lowercase ( self : int ) -> Any:
lowerCAmelCase_ : int = ["""examples/by_feature/multi_process_metrics.py"""]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __lowercase ( self : Tuple ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
lowerCAmelCase_ : Dict = F'\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(_lowerCAmelCase , """tracking""" ) ) )
def __lowercase ( self : int ) -> Tuple:
lowerCAmelCase_ : Optional[Any] = ["""examples/by_feature/gradient_accumulation.py"""]
run_command(self._launch_args + testargs )
def __lowercase ( self : Any ) -> Optional[int]:
lowerCAmelCase_ : str = ["""examples/by_feature/local_sgd.py"""]
run_command(self._launch_args + testargs )
| 354 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
__A : str = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = _ask_options(
"""In which compute environment are you running?""" , ["""This machine""", """AWS (Amazon SageMaker)"""] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
lowerCAmelCase_ : str = get_sagemaker_input()
else:
lowerCAmelCase_ : Optional[int] = get_cluster_input()
return config
def UpperCamelCase_ ( A__ : Optional[Any]=None ):
'''simple docstring'''
if subparsers is not None:
lowerCAmelCase_ : List[str] = subparsers.add_parser("""config""" , description=A__ )
else:
lowerCAmelCase_ : Optional[int] = argparse.ArgumentParser("""Accelerate config command""" , description=A__ )
parser.add_argument(
"""--config_file""" , default=A__ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , )
if subparsers is not None:
parser.set_defaults(func=A__ )
return parser
def UpperCamelCase_ ( A__ : Any ):
'''simple docstring'''
lowerCAmelCase_ : Dict = get_user_input()
if args.config_file is not None:
lowerCAmelCase_ : List[str] = args.config_file
else:
if not os.path.isdir(A__ ):
os.makedirs(A__ )
lowerCAmelCase_ : List[Any] = default_yaml_config_file
if config_file.endswith(""".json""" ):
config.to_json_file(A__ )
else:
config.to_yaml_file(A__ )
print(f'accelerate configuration saved at {config_file}' )
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : str = config_command_parser()
lowerCAmelCase_ : Tuple = parser.parse_args()
config_command(A__ )
if __name__ == "__main__":
main()
| 89 | 0 |
"""simple docstring"""
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def __A ( a_ :List[Any] , a_ :Any , a_ :int=10_24 , a_ :Optional[Any]=10_24 , a_ :List[Any]=False , **a_ :List[str]) -> Optional[Any]:
__a : Dict = AutoTokenizer.from_pretrained(a_)
__a : int = SeqaSeqDataset(a_ , a_ , a_ , a_ , type_path='''train''' , **a_)
__a : List[str] = tok.pad_token_id
def get_lens(a_ :Any):
__a : Any = tqdm(
DataLoader(a_ , batch_size=5_12 , num_workers=8 , shuffle=a_ , collate_fn=ds.collate_fn) , desc=str(ds.len_file) , )
__a : Optional[int] = []
for batch in dl:
__a : Optional[Any] = batch['''input_ids'''].ne(a_).sum(1).tolist()
__a : Any = batch['''labels'''].ne(a_).sum(1).tolist()
if consider_target:
for src, tgt in zip(a_ , a_):
max_lens.append(max(a_ , a_))
else:
max_lens.extend(a_)
return max_lens
__a : Any = get_lens(a_)
__a : Dict = SeqaSeqDataset(a_ , a_ , a_ , a_ , type_path='''val''' , **a_)
__a : Optional[int] = get_lens(a_)
pickle_save(a_ , train_ds.len_file)
pickle_save(a_ , val_ds.len_file)
if __name__ == "__main__":
fire.Fire(save_len_file)
| 160 |
"""simple docstring"""
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
A = NewType('''DataClass''', Any)
A = NewType('''DataClassType''', Any)
def __A ( a_ :List[str]) -> Tuple:
if isinstance(a_ , a_):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""")
def __A ( a_ :list) -> Callable[[str], Any]:
__a : Any = {str(a_): choice for choice in choices}
return lambda a_: str_to_choice.get(a_ , a_)
def __A ( *,
a_ :Union[str, List[str]] = None , a_ :str = None , a_ :Any = dataclasses.MISSING , a_ :Callable[[], Any] = dataclasses.MISSING , a_ :dict = None , **a_ :str , ) -> dataclasses.Field:
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
__a : List[Any] = {}
if aliases is not None:
__a : Optional[Any] = aliases
if help is not None:
__a : int = help
return dataclasses.field(metadata=a_ , default=a_ , default_factory=a_ , **a_)
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = 42
def __init__( self , _UpperCAmelCase , **_UpperCAmelCase ):
# To make the default appear when using --help
if "formatter_class" not in kwargs:
__a : str = ArgumentDefaultsHelpFormatter
super().__init__(**_UpperCAmelCase )
if dataclasses.is_dataclass(_UpperCAmelCase ):
__a : int = [dataclass_types]
__a : Optional[Any] = list(_UpperCAmelCase )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(_UpperCAmelCase )
@staticmethod
def _lowerCamelCase ( _UpperCAmelCase , _UpperCAmelCase ):
__a : List[Any] = f"""--{field.name}"""
__a : Optional[int] = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , _UpperCAmelCase ):
raise RuntimeError(
'''Unresolved type detected, which should have been done with the help of '''
'''`typing.get_type_hints` method by default''' )
__a : Dict = kwargs.pop('''aliases''' , [] )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a : List[str] = [aliases]
__a : Tuple = getattr(field.type , '''__origin__''' , field.type )
if origin_type is Union or (hasattr(_UpperCAmelCase , '''UnionType''' ) and isinstance(_UpperCAmelCase , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(_UpperCAmelCase ) not in field.type.__args__
):
raise ValueError(
'''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'''
''' the argument parser only supports one type per argument.'''
f""" Problem encountered in field '{field.name}'.""" )
if type(_UpperCAmelCase ) not in field.type.__args__:
# filter `str` in Union
__a : List[str] = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
__a : List[str] = getattr(field.type , '''__origin__''' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
__a : List[str] = (
field.type.__args__[0] if isinstance(_UpperCAmelCase , field.type.__args__[1] ) else field.type.__args__[1]
)
__a : Optional[Any] = getattr(field.type , '''__origin__''' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
__a : Optional[int] = {}
if origin_type is Literal or (isinstance(field.type , _UpperCAmelCase ) and issubclass(field.type , _UpperCAmelCase )):
if origin_type is Literal:
__a : int = field.type.__args__
else:
__a : List[str] = [x.value for x in field.type]
__a : Any = make_choice_type_function(kwargs['''choices'''] )
if field.default is not dataclasses.MISSING:
__a : Tuple = field.default
else:
__a : Optional[int] = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
__a : Any = copy(_UpperCAmelCase )
# Hack because type=bool in argparse does not behave as we want.
__a : List[str] = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
__a : Union[str, Any] = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
__a : List[Any] = default
# This tells argparse we accept 0 or 1 value after --field_name
__a : Union[str, Any] = '''?'''
# This is the value that will get picked if we do --field_name (without value)
__a : List[Any] = True
elif isclass(_UpperCAmelCase ) and issubclass(_UpperCAmelCase , _UpperCAmelCase ):
__a : Dict = field.type.__args__[0]
__a : Optional[int] = '''+'''
if field.default_factory is not dataclasses.MISSING:
__a : Union[str, Any] = field.default_factory()
elif field.default is dataclasses.MISSING:
__a : List[Any] = True
else:
__a : int = field.type
if field.default is not dataclasses.MISSING:
__a : Optional[Any] = field.default
elif field.default_factory is not dataclasses.MISSING:
__a : Optional[int] = field.default_factory()
else:
__a : Union[str, Any] = True
parser.add_argument(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
__a : Any = False
parser.add_argument(f"""--no_{field.name}""" , action='''store_false''' , dest=field.name , **_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase ):
if hasattr(_UpperCAmelCase , '''_argument_group_name''' ):
__a : Any = self.add_argument_group(dtype._argument_group_name )
else:
__a : Optional[Any] = self
try:
__a : Dict[str, type] = get_type_hints(_UpperCAmelCase )
except NameError:
raise RuntimeError(
f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
'''removing line of `from __future__ import annotations` which opts in Postponed '''
'''Evaluation of Annotations (PEP 563)''' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(_UpperCAmelCase ):
__a : Union[str, Any] = '''.'''.join(map(_UpperCAmelCase , sys.version_info[:3] ) )
raise RuntimeError(
f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
'''line of `from __future__ import annotations` which opts in union types as '''
'''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '''
'''support Python versions that lower than 3.10, you need to use '''
'''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '''
'''`X | None`.''' ) from ex
raise
for field in dataclasses.fields(_UpperCAmelCase ):
if not field.init:
continue
__a : str = type_hints[field.name]
self._parse_dataclass_field(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase=None , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=None , ):
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
__a : int = []
if args_filename:
args_files.append(Path(_UpperCAmelCase ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
__a : Optional[Any] = ArgumentParser()
args_file_parser.add_argument(_UpperCAmelCase , type=_UpperCAmelCase , action='''append''' )
# Use only remaining args for further parsing (remove the args_file_flag)
__a , __a : List[Any] = args_file_parser.parse_known_args(args=_UpperCAmelCase )
__a : Union[str, Any] = vars(_UpperCAmelCase ).get(args_file_flag.lstrip('''-''' ) , _UpperCAmelCase )
if cmd_args_file_paths:
args_files.extend([Path(_UpperCAmelCase ) for p in cmd_args_file_paths] )
__a : Union[str, Any] = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
__a : Dict = file_args + args if args is not None else file_args + sys.argv[1:]
__a , __a : str = self.parse_known_args(args=_UpperCAmelCase )
__a : Optional[int] = []
for dtype in self.dataclass_types:
__a : Optional[int] = {f.name for f in dataclasses.fields(_UpperCAmelCase ) if f.init}
__a : List[str] = {k: v for k, v in vars(_UpperCAmelCase ).items() if k in keys}
for k in keys:
delattr(_UpperCAmelCase , _UpperCAmelCase )
__a : int = dtype(**_UpperCAmelCase )
outputs.append(_UpperCAmelCase )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(_UpperCAmelCase )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = False ):
__a : Tuple = set(args.keys() )
__a : List[str] = []
for dtype in self.dataclass_types:
__a : Dict = {f.name for f in dataclasses.fields(_UpperCAmelCase ) if f.init}
__a : Union[str, Any] = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
__a : Tuple = dtype(**_UpperCAmelCase )
outputs.append(_UpperCAmelCase )
if not allow_extra_keys and unused_keys:
raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(_UpperCAmelCase )}""" )
return tuple(_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = False ):
with open(Path(_UpperCAmelCase ) , encoding='''utf-8''' ) as open_json_file:
__a : int = json.loads(open_json_file.read() )
__a : str = self.parse_dict(_UpperCAmelCase , allow_extra_keys=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = False ):
__a : Tuple = self.parse_dict(yaml.safe_load(Path(_UpperCAmelCase ).read_text() ) , allow_extra_keys=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
| 160 | 1 |
import sys
_lowercase: List[str] = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def a( A : str = N ) -> int:
"""simple docstring"""
a = -sys.maxsize - 1
for i in range(len(A ) - 12 ):
a = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
a = product
return largest_product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 71 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def a( A : Optional[Any] ) -> Tuple:
"""simple docstring"""
a = 384
a = 7
if "tiny" in model_name:
a = 96
a = (2, 2, 6, 2)
a = (3, 6, 12, 24)
elif "small" in model_name:
a = 96
a = (2, 2, 18, 2)
a = (3, 6, 12, 24)
elif "base" in model_name:
a = 128
a = (2, 2, 18, 2)
a = (4, 8, 16, 32)
a = 12
a = 512
elif "large" in model_name:
a = 192
a = (2, 2, 18, 2)
a = (6, 12, 24, 48)
a = 12
a = 768
# set label information
a = 150
a = "huggingface/label-files"
a = "ade20k-id2label.json"
a = json.load(open(hf_hub_download(A , A , repo_type="dataset" ) , "r" ) )
a = {int(A ): v for k, v in idalabel.items()}
a = {v: k for k, v in idalabel.items()}
a = SwinConfig(
embed_dim=A , depths=A , num_heads=A , window_size=A , out_features=["stage1", "stage2", "stage3", "stage4"] , )
a = UperNetConfig(
backbone_config=A , auxiliary_in_channels=A , num_labels=A , idalabel=A , labelaid=A , )
return config
def a( A : Optional[Any] ) -> Tuple:
"""simple docstring"""
a = []
# fmt: off
# stem
rename_keys.append(("backbone.patch_embed.projection.weight", "backbone.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.projection.bias", "backbone.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "backbone.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "backbone.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def a( A : List[str] , A : List[str] , A : Dict ) -> Any:
"""simple docstring"""
a = dct.pop(A )
a = val
def a( A : str , A : List[str] ) -> List[Any]:
"""simple docstring"""
a = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
a = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
a = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
a = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
a = in_proj_weight[:dim, :]
a = in_proj_bias[: dim]
a = in_proj_weight[
dim : dim * 2, :
]
a = in_proj_bias[
dim : dim * 2
]
a = in_proj_weight[
-dim :, :
]
a = in_proj_bias[-dim :]
# fmt: on
def a( A : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
a , a = x.shape
a = x.reshape(A , 4 , in_channel // 4 )
a = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(A , A )
return x
def a( A : int ) -> Dict:
"""simple docstring"""
a , a = x.shape
a = x.reshape(A , in_channel // 4 , 4 )
a = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(A , A )
return x
def a( A : List[Any] ) -> Dict:
"""simple docstring"""
a = x.shape[0]
a = x.reshape(4 , in_channel // 4 )
a = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(A )
return x
def a( A : Optional[Any] ) -> List[str]:
"""simple docstring"""
a = x.shape[0]
a = x.reshape(in_channel // 4 , 4 )
a = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(A )
return x
def a( A : Any , A : int , A : Dict ) -> Union[str, Any]:
"""simple docstring"""
a = {
"upernet-swin-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth",
"upernet-swin-small": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth",
"upernet-swin-base": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth",
"upernet-swin-large": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth",
}
a = model_name_to_url[model_name]
a = torch.hub.load_state_dict_from_url(A , map_location="cpu" , file_name=A )[
"state_dict"
]
for name, param in state_dict.items():
print(A , param.shape )
a = get_upernet_config(A )
a = UperNetForSemanticSegmentation(A )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
a = state_dict.pop(A )
if "bn" in key:
a = key.replace("bn" , "batch_norm" )
a = val
# rename keys
a = create_rename_keys(A )
for src, dest in rename_keys:
rename_key(A , A , A )
read_in_q_k_v(A , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
a = reverse_correct_unfold_reduction_order(A )
if "norm" in key:
a = reverse_correct_unfold_norm_order(A )
model.load_state_dict(A )
# verify on image
a = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
a = Image.open(requests.get(A , stream=A ).raw ).convert("RGB" )
a = SegformerImageProcessor()
a = processor(A , return_tensors="pt" ).pixel_values
with torch.no_grad():
a = model(A )
a = outputs.logits
print(logits.shape )
print("First values of logits:" , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
a = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] )
elif model_name == "upernet-swin-small":
a = torch.tensor(
[[-7.1_921, -7.1_921, -6.9_532], [-7.1_921, -7.1_921, -6.9_532], [-7.0_908, -7.0_908, -6.8_534]] )
elif model_name == "upernet-swin-base":
a = torch.tensor(
[[-6.5_851, -6.5_851, -6.4_330], [-6.5_851, -6.5_851, -6.4_330], [-6.4_763, -6.4_763, -6.3_254]] )
elif model_name == "upernet-swin-large":
a = torch.tensor(
[[-7.5_297, -7.5_297, -7.3_802], [-7.5_297, -7.5_297, -7.3_802], [-7.4_044, -7.4_044, -7.2_586]] )
print("Logits:" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , A , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(A )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
_lowercase: Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-swin-tiny",
type=str,
choices=[F"""upernet-swin-{size}""" for size in ["tiny", "small", "base", "large"]],
help="Name of the Swin + UperNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_lowercase: int = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 71 | 1 |
"""simple docstring"""
from typing import List
import numpy as np
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Dict = {key: len(__lowerCamelCase ) for key, value in gen_kwargs.items() if isinstance(__lowerCamelCase, __lowerCamelCase )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
"Sharding is ambiguous for this dataset: "
+ "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n"
+ "\n".join(f"""\t- key {key} has length {length}""" for key, length in lists_lengths.items() )
+ "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, "
+ "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."
) )
UpperCAmelCase_ : List[str] = max(lists_lengths.values(), default=0 )
return max(1, __lowerCamelCase )
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Optional[int] = []
for group_idx in range(__lowerCamelCase ):
UpperCAmelCase_ : Union[str, Any] = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
UpperCAmelCase_ : Optional[Any] = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
UpperCAmelCase_ : List[Any] = range(__lowerCamelCase, start + num_shards_to_add )
shards_indices_per_group.append(__lowerCamelCase )
return shards_indices_per_group
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Any = _number_of_shards_in_gen_kwargs(__lowerCamelCase )
if num_shards == 1:
return [dict(__lowerCamelCase )]
else:
UpperCAmelCase_ : Any = _distribute_shards(num_shards=__lowerCamelCase, max_num_jobs=__lowerCamelCase )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(__lowerCamelCase, __lowerCamelCase )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(__lowerCamelCase ) )
]
def __a ( __lowerCamelCase ):
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key], __lowerCamelCase )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Union[str, Any] = {len(__lowerCamelCase ) for value in gen_kwargs.values() if isinstance(__lowerCamelCase, __lowerCamelCase )}
UpperCAmelCase_ : List[str] = {}
for size in list_sizes:
UpperCAmelCase_ : Tuple = list(range(__lowerCamelCase ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
UpperCAmelCase_ : Optional[int] = dict(__lowerCamelCase )
for key, value in shuffled_kwargs.items():
if isinstance(__lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : str = [value[i] for i in indices_per_size[len(__lowerCamelCase )]]
return shuffled_kwargs
| 61 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = ['''model.decoder.embed_positions.weights''']
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if "emb" in name:
lowercase__ : int = name.replace("emb" , "model.decoder.embed_tokens" )
if "transformer" in name:
lowercase__ : Any = name.replace("transformer" , "model.decoder" )
if "cross_attention" in name:
lowercase__ : int = name.replace("cross_attention" , "encoder_attn" )
if "linear1" in name:
lowercase__ : int = name.replace("linear1" , "fc1" )
if "linear2" in name:
lowercase__ : int = name.replace("linear2" , "fc2" )
if "norm1" in name:
lowercase__ : Union[str, Any] = name.replace("norm1" , "self_attn_layer_norm" )
if "norm_cross" in name:
lowercase__ : Union[str, Any] = name.replace("norm_cross" , "encoder_attn_layer_norm" )
if "norm2" in name:
lowercase__ : Dict = name.replace("norm2" , "final_layer_norm" )
if "out_norm" in name:
lowercase__ : Dict = name.replace("out_norm" , "model.decoder.layer_norm" )
if "linears" in name:
lowercase__ : Union[str, Any] = name.replace("linears" , "lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
lowercase__ : Union[str, Any] = name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" )
return name
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Optional[Any] = list(state_dict.keys() )
lowercase__ : Dict = {}
for key in keys:
lowercase__ : Tuple = state_dict.pop(lowerCamelCase__ )
lowercase__ : Union[str, Any] = rename_keys(lowerCamelCase__ )
if "in_proj_weight" in key:
# split fused qkv proj
lowercase__ : Optional[int] = val[:hidden_size, :]
lowercase__ : Optional[int] = val[hidden_size : 2 * hidden_size, :]
lowercase__ : List[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
lowercase__ : Union[str, Any] = val
else:
lowercase__ : List[Any] = val
return state_dict, enc_dec_proj_state_dict
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if checkpoint == "small":
# default config values
lowercase__ : Optional[Any] = 1_024
lowercase__ : int = 24
lowercase__ : Optional[Any] = 16
elif checkpoint == "medium":
lowercase__ : str = 1_536
lowercase__ : Union[str, Any] = 48
lowercase__ : Optional[int] = 24
elif checkpoint == "large":
lowercase__ : Tuple = 2_048
lowercase__ : Union[str, Any] = 48
lowercase__ : Dict = 32
else:
raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
lowercase__ : int = MusicgenDecoderConfig(
hidden_size=lowerCamelCase__ , ffn_dim=hidden_size * 4 , num_hidden_layers=lowerCamelCase__ , num_attention_heads=lowerCamelCase__ , )
return config
@torch.no_grad()
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__="cpu" ):
"""simple docstring"""
lowercase__ : List[Any] = MusicGen.get_pretrained(lowerCamelCase__ , device=lowerCamelCase__ )
lowercase__ : str = decoder_config_from_checkpoint(lowerCamelCase__ )
lowercase__ : Optional[Any] = fairseq_model.lm.state_dict()
lowercase__ , lowercase__ : Tuple = rename_state_dict(
lowerCamelCase__ , hidden_size=decoder_config.hidden_size )
lowercase__ : str = TaEncoderModel.from_pretrained("t5-base" )
lowercase__ : Tuple = EncodecModel.from_pretrained("facebook/encodec_32khz" )
lowercase__ : List[str] = MusicgenForCausalLM(lowerCamelCase__ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
lowercase__ , lowercase__ : List[str] = decoder.load_state_dict(lowerCamelCase__ , strict=lowerCamelCase__ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" )
if len(lowerCamelCase__ ) > 0:
raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
lowercase__ : Any = MusicgenForConditionalGeneration(text_encoder=lowerCamelCase__ , audio_encoder=lowerCamelCase__ , decoder=lowerCamelCase__ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(lowerCamelCase__ )
# check we can do a forward pass
lowercase__ : List[str] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
lowercase__ : Any = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
lowercase__ : List[str] = model(input_ids=lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
lowercase__ : List[Any] = AutoTokenizer.from_pretrained("t5-base" )
lowercase__ : Dict = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" )
lowercase__ : Optional[Any] = MusicgenProcessor(feature_extractor=lowerCamelCase__ , tokenizer=lowerCamelCase__ )
# set the appropriate bos/pad token ids
lowercase__ : List[Any] = 2_048
lowercase__ : List[Any] = 2_048
# set other default generation config params
lowercase__ : str = int(30 * audio_encoder.config.frame_rate )
lowercase__ : List[Any] = True
lowercase__ : Dict = 3.0
if pytorch_dump_folder is not None:
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(lowerCamelCase__ )
processor.save_pretrained(lowerCamelCase__ )
if repo_id:
logger.info(F"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(lowerCamelCase__ )
processor.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint''',
default='''small''',
type=str,
help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''',
)
parser.add_argument(
'''--pytorch_dump_folder''',
required=True,
default=None,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
parser.add_argument(
'''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 130 | 0 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class _a :
"""simple docstring"""
def __init__( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str=99 , UpperCAmelCase : Union[str, Any]=13 , UpperCAmelCase : Union[str, Any]=16 , UpperCAmelCase : Optional[int]=7 , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Tuple=True , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Any=False , UpperCAmelCase : Tuple=True , UpperCAmelCase : str=2 , UpperCAmelCase : Any=32 , UpperCAmelCase : Dict=4 , UpperCAmelCase : Tuple=4 , UpperCAmelCase : Tuple=30 , UpperCAmelCase : Optional[Any]=0 , UpperCAmelCase : Tuple=1 , UpperCAmelCase : Union[str, Any]=2 , UpperCAmelCase : Dict=None , ):
A_ = parent
A_ = batch_size
A_ = decoder_seq_length
# For common tests
A_ = self.decoder_seq_length
A_ = is_training
A_ = use_attention_mask
A_ = use_labels
A_ = vocab_size
A_ = d_model
A_ = d_model
A_ = decoder_layers
A_ = decoder_layers
A_ = decoder_ffn_dim
A_ = decoder_attention_heads
A_ = decoder_attention_heads
A_ = eos_token_id
A_ = bos_token_id
A_ = pad_token_id
A_ = decoder_start_token_id
A_ = use_cache
A_ = max_position_embeddings
A_ = None
A_ = decoder_seq_length
A_ = 2
A_ = 1
def __A ( self : List[Any] ):
A_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
A_ = None
if self.use_attention_mask:
A_ = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
A_ = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def __A ( self : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , ):
A_ = True
A_ = TrOCRDecoder(config=UpperCAmelCase ).to(UpperCAmelCase ).eval()
A_ = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
A_ = model(UpperCAmelCase , use_cache=UpperCAmelCase )
A_ = model(UpperCAmelCase )
A_ = model(UpperCAmelCase , use_cache=UpperCAmelCase )
self.parent.assertTrue(len(UpperCAmelCase ) == len(UpperCAmelCase ) )
self.parent.assertTrue(len(UpperCAmelCase ) == len(UpperCAmelCase ) + 1 )
A_ = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
A_ = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
A_ = torch.cat([input_ids, next_tokens] , dim=-1 )
A_ = model(UpperCAmelCase )["last_hidden_state"]
A_ = model(UpperCAmelCase , past_key_values=UpperCAmelCase )["last_hidden_state"]
# select random slice
A_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A_ = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
A_ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1E-3 )
def __A ( self : int ):
A_ = self.prepare_config_and_inputs()
A_ , A_ , A_ , A_ = config_and_inputs
A_ = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
class _a ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : int = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
_lowerCamelCase : Tuple = (TrOCRForCausalLM,) if is_torch_available() else ()
_lowerCamelCase : List[str] = {'text-generation': TrOCRForCausalLM} if is_torch_available() else {}
_lowerCamelCase : Dict = True
_lowerCamelCase : Dict = False
def __A ( self : Optional[int] ):
A_ = TrOCRStandaloneDecoderModelTester(self , is_training=UpperCAmelCase )
A_ = ConfigTester(self , config_class=UpperCAmelCase )
def __A ( self : Optional[int] ):
pass
def __A ( self : Union[str, Any] ):
pass
def __A ( self : int ):
pass
def __A ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def __A ( self : Dict ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*UpperCAmelCase )
def __A ( self : Any ):
return
@unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :)
def __A ( self : List[str] ):
pass
| 357 |
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__a :Optional[Any] = logging.get_logger(__name__)
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : List[str] , *UpperCAmelCase : int , **UpperCAmelCase : Optional[int] ):
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead." , UpperCAmelCase , )
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
| 329 | 0 |
'''simple docstring'''
_A : Tuple ={str(digit): digit**5 for digit in range(10)}
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> int:
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(UpperCamelCase ) )
def SCREAMING_SNAKE_CASE_ () -> int:
return sum(
number
for number in range(1000 , 1000000 )
if number == digits_fifth_powers_sum(UpperCamelCase ) )
if __name__ == "__main__":
print(solution())
| 41 |
from jiwer import compute_measures
import datasets
lowerCAmelCase : Tuple = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
lowerCAmelCase : List[Any] = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
lowerCAmelCase : Dict = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _A ( datasets.Metric):
def UpperCAmelCase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
] , )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
if concatenate_texts:
return compute_measures(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )["wer"]
else:
SCREAMING_SNAKE_CASE_ : str = 0
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
for prediction, reference in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : str = compute_measures(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 253 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class A__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: Dict) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = {
"task_specific_params": {
"summarization": {"length_penalty": 1.0, "max_length": 128, "min_length": 12, "num_beams": 4},
"summarization_cnn": {"length_penalty": 2.0, "max_length": 142, "min_length": 56, "num_beams": 4},
"summarization_xsum": {"length_penalty": 1.0, "max_length": 62, "min_length": 11, "num_beams": 6},
}
}
__lowerCAmelCase : List[Any] = {
"task_specific_params.summarization.length_penalty": 1.0,
"task_specific_params.summarization.max_length": 128,
"task_specific_params.summarization.min_length": 12,
"task_specific_params.summarization.num_beams": 4,
"task_specific_params.summarization_cnn.length_penalty": 2.0,
"task_specific_params.summarization_cnn.max_length": 142,
"task_specific_params.summarization_cnn.min_length": 56,
"task_specific_params.summarization_cnn.num_beams": 4,
"task_specific_params.summarization_xsum.length_penalty": 1.0,
"task_specific_params.summarization_xsum.max_length": 62,
"task_specific_params.summarization_xsum.min_length": 11,
"task_specific_params.summarization_xsum.num_beams": 6,
}
self.assertEqual(flatten_dict(_SCREAMING_SNAKE_CASE) , _SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Dict) -> int:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = np.random.randn(3 , 4)
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE) , x.transpose()))
__lowerCAmelCase : Optional[Any] = np.random.randn(3 , 4 , 5)
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0)) , x.transpose((1, 2, 0))))
@require_torch
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : int = np.random.randn(3 , 4)
__lowerCAmelCase : Tuple = torch.tensor(_SCREAMING_SNAKE_CASE)
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE) , transpose(_SCREAMING_SNAKE_CASE).numpy()))
__lowerCAmelCase : Union[str, Any] = np.random.randn(3 , 4 , 5)
__lowerCAmelCase : Any = torch.tensor(_SCREAMING_SNAKE_CASE)
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0)) , transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0)).numpy()))
@require_tf
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Tuple = np.random.randn(3 , 4)
__lowerCAmelCase : Any = tf.constant(_SCREAMING_SNAKE_CASE)
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE) , transpose(_SCREAMING_SNAKE_CASE).numpy()))
__lowerCAmelCase : Dict = np.random.randn(3 , 4 , 5)
__lowerCAmelCase : str = tf.constant(_SCREAMING_SNAKE_CASE)
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0)) , transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0)).numpy()))
@require_flax
def _SCREAMING_SNAKE_CASE ( self: Any) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = np.random.randn(3 , 4)
__lowerCAmelCase : Dict = jnp.array(_SCREAMING_SNAKE_CASE)
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE) , np.asarray(transpose(_SCREAMING_SNAKE_CASE))))
__lowerCAmelCase : Optional[int] = np.random.randn(3 , 4 , 5)
__lowerCAmelCase : Optional[int] = jnp.array(_SCREAMING_SNAKE_CASE)
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0)) , np.asarray(transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0)))))
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Any:
"""simple docstring"""
__lowerCAmelCase : Dict = np.random.randn(3 , 4)
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (4, 3)) , np.reshape(_SCREAMING_SNAKE_CASE , (4, 3))))
__lowerCAmelCase : Dict = np.random.randn(3 , 4 , 5)
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (12, 5)) , np.reshape(_SCREAMING_SNAKE_CASE , (12, 5))))
@require_torch
def _SCREAMING_SNAKE_CASE ( self: Any) -> Any:
"""simple docstring"""
__lowerCAmelCase : Dict = np.random.randn(3 , 4)
__lowerCAmelCase : List[Any] = torch.tensor(_SCREAMING_SNAKE_CASE)
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (4, 3)) , reshape(_SCREAMING_SNAKE_CASE , (4, 3)).numpy()))
__lowerCAmelCase : List[str] = np.random.randn(3 , 4 , 5)
__lowerCAmelCase : List[Any] = torch.tensor(_SCREAMING_SNAKE_CASE)
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (12, 5)) , reshape(_SCREAMING_SNAKE_CASE , (12, 5)).numpy()))
@require_tf
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Dict = np.random.randn(3 , 4)
__lowerCAmelCase : Tuple = tf.constant(_SCREAMING_SNAKE_CASE)
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (4, 3)) , reshape(_SCREAMING_SNAKE_CASE , (4, 3)).numpy()))
__lowerCAmelCase : Union[str, Any] = np.random.randn(3 , 4 , 5)
__lowerCAmelCase : Union[str, Any] = tf.constant(_SCREAMING_SNAKE_CASE)
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (12, 5)) , reshape(_SCREAMING_SNAKE_CASE , (12, 5)).numpy()))
@require_flax
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : int = np.random.randn(3 , 4)
__lowerCAmelCase : Tuple = jnp.array(_SCREAMING_SNAKE_CASE)
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (4, 3)) , np.asarray(reshape(_SCREAMING_SNAKE_CASE , (4, 3)))))
__lowerCAmelCase : Tuple = np.random.randn(3 , 4 , 5)
__lowerCAmelCase : List[str] = jnp.array(_SCREAMING_SNAKE_CASE)
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (12, 5)) , np.asarray(reshape(_SCREAMING_SNAKE_CASE , (12, 5)))))
def _SCREAMING_SNAKE_CASE ( self: Any) -> int:
"""simple docstring"""
__lowerCAmelCase : Any = np.random.randn(1 , 3 , 4)
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE) , np.squeeze(_SCREAMING_SNAKE_CASE)))
__lowerCAmelCase : Union[str, Any] = np.random.randn(1 , 4 , 1 , 5)
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE , axis=2) , np.squeeze(_SCREAMING_SNAKE_CASE , axis=2)))
@require_torch
def _SCREAMING_SNAKE_CASE ( self: Dict) -> str:
"""simple docstring"""
__lowerCAmelCase : List[str] = np.random.randn(1 , 3 , 4)
__lowerCAmelCase : Any = torch.tensor(_SCREAMING_SNAKE_CASE)
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE) , squeeze(_SCREAMING_SNAKE_CASE).numpy()))
__lowerCAmelCase : Union[str, Any] = np.random.randn(1 , 4 , 1 , 5)
__lowerCAmelCase : Dict = torch.tensor(_SCREAMING_SNAKE_CASE)
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE , axis=2) , squeeze(_SCREAMING_SNAKE_CASE , axis=2).numpy()))
@require_tf
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Dict:
"""simple docstring"""
__lowerCAmelCase : str = np.random.randn(1 , 3 , 4)
__lowerCAmelCase : int = tf.constant(_SCREAMING_SNAKE_CASE)
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE) , squeeze(_SCREAMING_SNAKE_CASE).numpy()))
__lowerCAmelCase : str = np.random.randn(1 , 4 , 1 , 5)
__lowerCAmelCase : Union[str, Any] = tf.constant(_SCREAMING_SNAKE_CASE)
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE , axis=2) , squeeze(_SCREAMING_SNAKE_CASE , axis=2).numpy()))
@require_flax
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = np.random.randn(1 , 3 , 4)
__lowerCAmelCase : List[str] = jnp.array(_SCREAMING_SNAKE_CASE)
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE) , np.asarray(squeeze(_SCREAMING_SNAKE_CASE))))
__lowerCAmelCase : Tuple = np.random.randn(1 , 4 , 1 , 5)
__lowerCAmelCase : Union[str, Any] = jnp.array(_SCREAMING_SNAKE_CASE)
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE , axis=2) , np.asarray(squeeze(_SCREAMING_SNAKE_CASE , axis=2))))
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Any:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = np.random.randn(3 , 4)
self.assertTrue(np.allclose(expand_dims(_SCREAMING_SNAKE_CASE , axis=1) , np.expand_dims(_SCREAMING_SNAKE_CASE , axis=1)))
@require_torch
def _SCREAMING_SNAKE_CASE ( self: Any) -> Any:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = np.random.randn(3 , 4)
__lowerCAmelCase : List[Any] = torch.tensor(_SCREAMING_SNAKE_CASE)
self.assertTrue(np.allclose(expand_dims(_SCREAMING_SNAKE_CASE , axis=1) , expand_dims(_SCREAMING_SNAKE_CASE , axis=1).numpy()))
@require_tf
def _SCREAMING_SNAKE_CASE ( self: Any) -> str:
"""simple docstring"""
__lowerCAmelCase : str = np.random.randn(3 , 4)
__lowerCAmelCase : int = tf.constant(_SCREAMING_SNAKE_CASE)
self.assertTrue(np.allclose(expand_dims(_SCREAMING_SNAKE_CASE , axis=1) , expand_dims(_SCREAMING_SNAKE_CASE , axis=1).numpy()))
@require_flax
def _SCREAMING_SNAKE_CASE ( self: Dict) -> str:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = np.random.randn(3 , 4)
__lowerCAmelCase : Any = jnp.array(_SCREAMING_SNAKE_CASE)
self.assertTrue(np.allclose(expand_dims(_SCREAMING_SNAKE_CASE , axis=1) , np.asarray(expand_dims(_SCREAMING_SNAKE_CASE , axis=1))))
| 356 |
"""simple docstring"""
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def _lowercase ( __snake_case ) -> Optional[Any]:
__lowerCAmelCase : str = fname.split(os.path.sep )[-1]
return re.search(r"^(.*)_\d+\.jpg$" ,__snake_case ).groups()[0]
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Dict=None , _SCREAMING_SNAKE_CASE: Any=None) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : str = file_names
__lowerCAmelCase : Optional[int] = image_transform
__lowerCAmelCase : List[Any] = label_to_id
def __len__( self: Union[str, Any]) -> int:
"""simple docstring"""
return len(self.file_names)
def __getitem__( self: List[Any] , _SCREAMING_SNAKE_CASE: Optional[Any]) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : int = self.file_names[idx]
__lowerCAmelCase : List[str] = PIL.Image.open(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = raw_image.convert("RGB")
if self.image_transform is not None:
__lowerCAmelCase : Union[str, Any] = self.image_transform(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = extract_label(_SCREAMING_SNAKE_CASE)
if self.label_to_id is not None:
__lowerCAmelCase : str = self.label_to_id[label]
return {"image": image, "label": label}
def _lowercase ( __snake_case ,__snake_case ) -> Optional[int]:
# Initialize accelerator
if args.with_tracking:
__lowerCAmelCase : str = Accelerator(
cpu=args.cpu ,mixed_precision=args.mixed_precision ,log_with="all" ,project_dir=args.project_dir )
else:
__lowerCAmelCase : Any = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCAmelCase : int = config["lr"]
__lowerCAmelCase : Union[str, Any] = int(config["num_epochs"] )
__lowerCAmelCase : Tuple = int(config["seed"] )
__lowerCAmelCase : Tuple = int(config["batch_size"] )
__lowerCAmelCase : int = config["image_size"]
if not isinstance(__snake_case ,(list, tuple) ):
__lowerCAmelCase : Tuple = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps ,"isdigit" ):
if args.checkpointing_steps == "epoch":
__lowerCAmelCase : Optional[Any] = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
__lowerCAmelCase : Dict = int(args.checkpointing_steps )
else:
raise ValueError(
F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
__lowerCAmelCase : int = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
__lowerCAmelCase : Dict = os.path.split(__snake_case )[-1].split("." )[0]
accelerator.init_trackers(__snake_case ,__snake_case )
# Grab all the image filenames
__lowerCAmelCase : Union[str, Any] = [os.path.join(args.data_dir ,__snake_case ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
__lowerCAmelCase : Union[str, Any] = [extract_label(__snake_case ) for fname in file_names]
__lowerCAmelCase : Any = list(set(__snake_case ) )
id_to_label.sort()
__lowerCAmelCase : Optional[Any] = {lbl: i for i, lbl in enumerate(__snake_case )}
# Set the seed before splitting the data.
np.random.seed(__snake_case )
torch.manual_seed(__snake_case )
torch.cuda.manual_seed_all(__snake_case )
# Split our filenames between train and validation
__lowerCAmelCase : List[str] = np.random.permutation(len(__snake_case ) )
__lowerCAmelCase : Dict = int(0.8 * len(__snake_case ) )
__lowerCAmelCase : str = random_perm[:cut]
__lowerCAmelCase : Optional[int] = random_perm[cut:]
# For training we use a simple RandomResizedCrop
__lowerCAmelCase : str = Compose([RandomResizedCrop(__snake_case ,scale=(0.5, 1.0) ), ToTensor()] )
__lowerCAmelCase : List[str] = PetsDataset(
[file_names[i] for i in train_split] ,image_transform=__snake_case ,label_to_id=__snake_case )
# For evaluation, we use a deterministic Resize
__lowerCAmelCase : Union[str, Any] = Compose([Resize(__snake_case ), ToTensor()] )
__lowerCAmelCase : List[str] = PetsDataset([file_names[i] for i in eval_split] ,image_transform=__snake_case ,label_to_id=__snake_case )
# Instantiate dataloaders.
__lowerCAmelCase : Union[str, Any] = DataLoader(__snake_case ,shuffle=__snake_case ,batch_size=__snake_case ,num_workers=4 )
__lowerCAmelCase : Any = DataLoader(__snake_case ,shuffle=__snake_case ,batch_size=__snake_case ,num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCAmelCase : int = create_model("resnet50d" ,pretrained=__snake_case ,num_classes=len(__snake_case ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowerCAmelCase : List[str] = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
__lowerCAmelCase : Any = False
for param in model.get_classifier().parameters():
__lowerCAmelCase : List[Any] = True
# We normalize the batches of images to be a bit faster.
__lowerCAmelCase : Optional[Any] = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
__lowerCAmelCase : int = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
__lowerCAmelCase : int = torch.optim.Adam(params=model.parameters() ,lr=lr / 25 )
# Instantiate learning rate scheduler
__lowerCAmelCase : List[Any] = OneCycleLR(optimizer=__snake_case ,max_lr=__snake_case ,epochs=__snake_case ,steps_per_epoch=len(__snake_case ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Tuple = accelerator.prepare(
__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
# We need to keep track of how many total steps we have iterated over
__lowerCAmelCase : Dict = 0
# We also need to keep track of the starting epoch so files are named properly
__lowerCAmelCase : List[str] = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
__lowerCAmelCase : Optional[Any] = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
__lowerCAmelCase : Optional[int] = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
__lowerCAmelCase : Optional[Any] = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
__lowerCAmelCase : str = os.path.splitext(__snake_case )[0]
if "epoch" in training_difference:
__lowerCAmelCase : Dict = int(training_difference.replace("epoch_" ,"" ) ) + 1
__lowerCAmelCase : Optional[Any] = None
else:
__lowerCAmelCase : Any = int(training_difference.replace("step_" ,"" ) )
__lowerCAmelCase : Optional[int] = resume_step // len(__snake_case )
resume_step -= starting_epoch * len(__snake_case )
# Now we train the model
for epoch in range(__snake_case ,__snake_case ):
model.train()
if args.with_tracking:
__lowerCAmelCase : Any = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
__lowerCAmelCase : Optional[int] = accelerator.skip_first_batches(__snake_case ,__snake_case )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
__lowerCAmelCase : Optional[int] = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
__lowerCAmelCase : List[Any] = {k: v.to(accelerator.device ) for k, v in batch.items()}
__lowerCAmelCase : Union[str, Any] = (batch["image"] - mean) / std
__lowerCAmelCase : Optional[int] = model(__snake_case )
__lowerCAmelCase : List[str] = torch.nn.functional.cross_entropy(__snake_case ,batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(__snake_case )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(__snake_case ,__snake_case ):
__lowerCAmelCase : List[Any] = F"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
__lowerCAmelCase : Tuple = os.path.join(args.output_dir ,__snake_case )
accelerator.save_state(__snake_case )
model.eval()
__lowerCAmelCase : int = 0
__lowerCAmelCase : Optional[int] = 0
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
__lowerCAmelCase : Tuple = {k: v.to(accelerator.device ) for k, v in batch.items()}
__lowerCAmelCase : Optional[Any] = (batch["image"] - mean) / std
with torch.no_grad():
__lowerCAmelCase : Optional[Any] = model(__snake_case )
__lowerCAmelCase : List[str] = outputs.argmax(dim=-1 )
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = accelerator.gather_for_metrics((predictions, batch["label"]) )
__lowerCAmelCase : str = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
__lowerCAmelCase : Optional[Any] = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(__snake_case ),
"epoch": epoch,
} ,step=__snake_case ,)
if checkpointing_steps == "epoch":
__lowerCAmelCase : Tuple = F"""epoch_{epoch}"""
if args.output_dir is not None:
__lowerCAmelCase : Optional[Any] = os.path.join(args.output_dir ,__snake_case )
accelerator.save_state(__snake_case )
if args.with_tracking:
accelerator.end_training()
def _lowercase ( ) -> Tuple:
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir" ,required=__snake_case ,help="The data folder on disk." )
parser.add_argument("--fp16" ,action="store_true" ,help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision" ,type=__snake_case ,default=__snake_case ,choices=["no", "fp16", "bf16", "fp8"] ,help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." ,)
parser.add_argument("--cpu" ,action="store_true" ,help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps" ,type=__snake_case ,default=__snake_case ,help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch." ,)
parser.add_argument(
"--output_dir" ,type=__snake_case ,default="." ,help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." ,)
parser.add_argument(
"--resume_from_checkpoint" ,type=__snake_case ,default=__snake_case ,help="If the training should continue from a checkpoint folder." ,)
parser.add_argument(
"--with_tracking" ,action="store_true" ,help="Whether to load in all available experiment trackers from the environment and use them for logging." ,)
parser.add_argument(
"--project_dir" ,type=__snake_case ,default="logs" ,help="Location on where to store experiment tracking logs` and relevent project information" ,)
__lowerCAmelCase : List[Any] = parser.parse_args()
__lowerCAmelCase : List[Any] = {"lr": 3e-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(__snake_case ,__snake_case )
if __name__ == "__main__":
main()
| 58 | 0 |
'''simple docstring'''
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Tuple ) -> int:
UpperCAmelCase : Optional[Any] = ['a', 'b', 'c']
# Defaults to last layer if both are None
UpperCAmelCase , UpperCAmelCase : Tuple = get_aligned_output_features_output_indices(lowercase_ , lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , ['c'] )
self.assertEqual(lowercase_ , [2] )
# Out indices set to match out features
UpperCAmelCase , UpperCAmelCase : str = get_aligned_output_features_output_indices(['a', 'c'] , lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , ['a', 'c'] )
self.assertEqual(lowercase_ , [0, 2] )
# Out features set to match out indices
UpperCAmelCase , UpperCAmelCase : int = get_aligned_output_features_output_indices(lowercase_ , [0, 2] , lowercase_ )
self.assertEqual(lowercase_ , ['a', 'c'] )
self.assertEqual(lowercase_ , [0, 2] )
# Out features selected from negative indices
UpperCAmelCase , UpperCAmelCase : Optional[int] = get_aligned_output_features_output_indices(lowercase_ , [-3, -1] , lowercase_ )
self.assertEqual(lowercase_ , ['a', 'c'] )
self.assertEqual(lowercase_ , [-3, -1] )
def UpperCAmelCase_ ( self : int ) -> int:
# Stage names must be set
with self.assertRaises(lowercase_ ):
verify_out_features_out_indices(['a', 'b'] , (0, 1) , lowercase_ )
# Out features must be a list
with self.assertRaises(lowercase_ ):
verify_out_features_out_indices(('a', 'b') , (0, 1) , ['a', 'b'] )
# Out features must be a subset of stage names
with self.assertRaises(lowercase_ ):
verify_out_features_out_indices(['a', 'b'] , (0, 1) , ['a'] )
# Out indices must be a list or tuple
with self.assertRaises(lowercase_ ):
verify_out_features_out_indices(lowercase_ , 0 , ['a', 'b'] )
# Out indices must be a subset of stage names
with self.assertRaises(lowercase_ ):
verify_out_features_out_indices(lowercase_ , (0, 1) , ['a'] )
# Out features and out indices must be the same length
with self.assertRaises(lowercase_ ):
verify_out_features_out_indices(['a', 'b'] , (0,) , ['a', 'b', 'c'] )
# Out features should match out indices
with self.assertRaises(lowercase_ ):
verify_out_features_out_indices(['a', 'b'] , (0, 2) , ['a', 'b', 'c'] )
# Out features and out indices should be in order
with self.assertRaises(lowercase_ ):
verify_out_features_out_indices(['b', 'a'] , (0, 1) , ['a', 'b'] )
# Check passes with valid inputs
verify_out_features_out_indices(['a', 'b', 'd'] , (0, 1, -1) , ['a', 'b', 'c', 'd'] )
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
UpperCAmelCase : int = BackboneMixin()
UpperCAmelCase : Tuple = ['a', 'b', 'c']
UpperCAmelCase : List[Any] = ['a', 'c']
UpperCAmelCase : List[str] = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ['a', 'c'] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
UpperCAmelCase : Tuple = ['a', 'b']
self.assertEqual(backbone.out_features , ['a', 'b'] )
self.assertEqual(backbone.out_indices , [0, 1] )
UpperCAmelCase : Any = [-3, -1]
self.assertEqual(backbone.out_features , ['a', 'c'] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 151 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A_ :
'''simple docstring'''
def __init__( self : Any , lowercase_ : List[Any] , lowercase_ : int=3 , lowercase_ : Dict=32 , lowercase_ : Optional[Any]=3 , lowercase_ : Tuple=10 , lowercase_ : Optional[Any]=[10, 20, 30, 40] , lowercase_ : List[str]=[1, 1, 2, 1] , lowercase_ : Optional[int]=True , lowercase_ : str=True , lowercase_ : Dict="relu" , lowercase_ : Optional[Any]=3 , lowercase_ : List[str]=None , ) -> int:
UpperCAmelCase : Dict = parent
UpperCAmelCase : Dict = batch_size
UpperCAmelCase : Any = image_size
UpperCAmelCase : Any = num_channels
UpperCAmelCase : List[str] = embeddings_size
UpperCAmelCase : str = hidden_sizes
UpperCAmelCase : str = depths
UpperCAmelCase : Optional[int] = is_training
UpperCAmelCase : int = use_labels
UpperCAmelCase : Optional[int] = hidden_act
UpperCAmelCase : List[Any] = num_labels
UpperCAmelCase : Union[str, Any] = scope
UpperCAmelCase : Any = len(lowercase_ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
UpperCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : Any = None
if self.use_labels:
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCAmelCase_ ( self : int , lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase : List[Any] = TFResNetModel(config=lowercase_ )
UpperCAmelCase : int = model(lowercase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase_ ( self : Any , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : Optional[int] ) -> List[Any]:
UpperCAmelCase : List[Any] = self.num_labels
UpperCAmelCase : Union[str, Any] = TFResNetForImageClassification(lowercase_ )
UpperCAmelCase : Any = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = config_and_inputs
UpperCAmelCase : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class A_ ( _snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
UpperCAmelCase_ : Dict = (
{"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification}
if is_tf_available()
else {}
)
UpperCAmelCase_ : Tuple = False
UpperCAmelCase_ : Tuple = False
UpperCAmelCase_ : List[Any] = False
UpperCAmelCase_ : str = False
UpperCAmelCase_ : Optional[int] = False
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
UpperCAmelCase : Optional[int] = TFResNetModelTester(self )
UpperCAmelCase : Dict = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ )
def UpperCAmelCase_ ( self : str ) -> Any:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]:
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
pass
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any:
UpperCAmelCase , UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : List[Any] = model_class(lowercase_ )
UpperCAmelCase : List[str] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : str = [*signature.parameters.keys()]
UpperCAmelCase : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
def check_hidden_states_output(lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] ):
UpperCAmelCase : Union[str, Any] = model_class(lowercase_ )
UpperCAmelCase : Optional[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
UpperCAmelCase : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase : int = self.model_tester.num_stages
self.assertEqual(len(lowercase_ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Tuple = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCAmelCase : List[Any] = layer_type
UpperCAmelCase : int = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : List[Any] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> Tuple:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@slow
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : str = TFResNetModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def UpperCamelCase( ):
UpperCAmelCase : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase_ ( self : Tuple ) -> Any:
UpperCAmelCase : Optional[int] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCAmelCase : Any = self.default_image_processor
UpperCAmelCase : List[Any] = prepare_img()
UpperCAmelCase : Dict = image_processor(images=lowercase_ , return_tensors='tf' )
# forward pass
UpperCAmelCase : List[Any] = model(**lowercase_ )
# verify the logits
UpperCAmelCase : Optional[Any] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowercase_ )
UpperCAmelCase : int = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , lowercase_ , atol=1E-4 ) )
| 151 | 1 |
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class __A ( __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = WavaVecaPhonemeCTCTokenizer
__lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
super().setUp()
a =(
'''<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː '''
'''ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː '''
'''ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 '''
'''oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ '''
'''pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ '''
'''yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ '''
'''əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ '''
'''ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ '''
'''ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ '''
'''uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ '''
'''ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ '''
'''ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ '''
'''ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4'''
).split(''' ''' )
a =dict(zip(__lowercase , range(len(__lowercase ) ) ) )
a ={'''pad_token''': '''<pad>''', '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>'''}
a =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowercase ) + '''\n''' )
def SCREAMING_SNAKE_CASE ( self , __A , __A=False , __A=20 , __A=5 ) -> Optional[int]:
a =[(i, tokenizer.decode([i] , clean_up_tokenization_spaces=__lowercase )) for i in range(len(__lowercase ) )]
a =list(filter(lambda __A : [t[0]] == tokenizer.encode(t[1] , do_phonemize=__lowercase ) , __lowercase ) )
if max_length is not None and len(__lowercase ) > max_length:
a =toks[:max_length]
if min_length is not None and len(__lowercase ) < min_length and len(__lowercase ) > 0:
while len(__lowercase ) < min_length:
a =toks + toks
# toks_str = [t[1] for t in toks]
a =[t[0] for t in toks]
# Ensure consistency
a =tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase )
if " " not in output_txt and len(__lowercase ) > 1:
a =(
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__lowercase )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__lowercase )
)
if with_prefix_space:
a =''' ''' + output_txt
a =tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
return output_txt, output_ids
def SCREAMING_SNAKE_CASE ( self , **__A ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a =self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
# check adding a single token
tokenizer.add_tokens('''xxx''' )
a =tokenizer('''m xxx ɪ''' , do_phonemize=__lowercase ).input_ids
self.assertEqual(__lowercase , [13, 392, 17] ) # xxx should be last token
tokenizer.add_tokens(['''aaa''', '''bbb''', '''ccc'''] )
a =tokenizer('''m aaa ɪ ccc''' , do_phonemize=__lowercase ).input_ids
self.assertEqual(__lowercase , [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa
a =tokenizer('''maɪ c''' , do_phonemize=__lowercase ).input_ids
self.assertEqual(__lowercase , [3, 200] ) # mai should be <unk> (=3)
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a =self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
a ='''Hello how are you'''
a =tokenizer.phonemize(__lowercase , phonemizer_lang='''en-us''' )
self.assertEqual(__lowercase , '''h ə l oʊ h aʊ ɑːɹ j uː''' )
def SCREAMING_SNAKE_CASE ( self ) -> int:
a =self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
a ='''Hello how are you'''
a =tokenizer.phonemize(__lowercase , phonemizer_lang='''en-us''' )
self.assertEqual(tokenizer(__lowercase ).input_ids , tokenizer(__lowercase , do_phonemize=__lowercase ).input_ids )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a =self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
a ='''Hello how are you'''
a =tokenizer.phonemize(__lowercase , phonemizer_lang='''en-us''' )
a =tokenizer.decode(tokenizer(__lowercase ).input_ids )
self.assertEqual(__lowercase , __lowercase )
def SCREAMING_SNAKE_CASE ( self ) -> str:
a =self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
a =[
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
a =tokenizer.decode(sample_ids[0] )
a =tokenizer.batch_decode(__lowercase )
self.assertEqual(__lowercase , batch_tokens[0] )
self.assertEqual(__lowercase , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a =self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
a ='''Hello how are you'''
a =tokenizer.phonemize(__lowercase , phonemizer_lang='''en-us''' )
self.assertEqual(__lowercase , '''h ə l oʊ | h aʊ | ɑːɹ | j uː |''' )
def SCREAMING_SNAKE_CASE ( self ) -> str:
a =self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
a ='''Hello how are you'''
a =tokenizer.phonemize(__lowercase , phonemizer_lang='''en-us''' )
self.assertEqual(tokenizer(__lowercase ).input_ids , tokenizer(__lowercase , do_phonemize=__lowercase ).input_ids )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a =self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
# fmt: off
a =[
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
a =tokenizer.decode(sample_ids[0] )
a =tokenizer.batch_decode(__lowercase )
self.assertEqual(__lowercase , batch_tokens[0] )
self.assertEqual(__lowercase , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] )
# decode with no word_del_token filter
a =tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=__lowercase )
a =tokenizer.batch_decode(__lowercase , filter_word_delimiter_token=__lowercase )
self.assertEqual(__lowercase , batch_tokens[0] )
self.assertEqual(__lowercase , ['''k s ɾ | ɾ l | ɭʲ''', '''| j ð | s j ð s oːɹ'''] )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a =self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
a ='''Hello how are you'''
a =tokenizer.phonemize(__lowercase , phonemizer_lang='''en-us''' )
a =tokenizer.decode(tokenizer(__lowercase ).input_ids , filter_word_delimiter_token=__lowercase )
self.assertEqual(__lowercase , __lowercase )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a =self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
a ='''Hello how are you'''
a =tokenizer.phonemize(__lowercase , phonemizer_lang='''en-us''' )
a =tokenizer.decode(tokenizer(__lowercase ).input_ids , filter_word_delimiter_token=__lowercase )
self.assertEqual(''' '''.join([p.strip() for p in phonemes.split(''' |''' )] ).strip() , __lowercase )
def SCREAMING_SNAKE_CASE ( self ) -> str:
a =self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token=__lowercase )
a ='''Hello how are you'''
a =tokenizer(__lowercase , phonemizer_lang='''en-us''' ).input_ids
a =tokenizer(__lowercase , phonemizer_lang='''fr-fr''' ).input_ids
self.assertNotEqual(__lowercase , __lowercase )
a =tokenizer.decode(__lowercase )
a =tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , '''h ə l oʊ h aʊ ɑːɹ j uː''' )
self.assertEqual(__lowercase , '''ɛ l o h aʊ a ʁ j u''' )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a =self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
a ='''Hello how Are you'''
a ='''hello how are you'''
a =tokenizer(__lowercase ).input_ids
a =tokenizer(__lowercase ).input_ids
self.assertEqual(__lowercase , __lowercase )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a =self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
tokenizer.add_tokens(['''!''', '''?'''] )
tokenizer.add_special_tokens({'''cls_token''': '''$$$'''} )
# fmt: off
a =[
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394],
]
# fmt: on
a =tokenizer.batch_decode(__lowercase )
self.assertEqual(__lowercase , ['''k s ɾ ɾ l ɭʲ!?!? $$$''', '''j ð s j ð s oːɹ $$$'''] )
@staticmethod
def SCREAMING_SNAKE_CASE ( __A , __A ) -> Tuple:
a =[d[key] for d in offsets]
return retrieved_list
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a =self.get_tokenizer(word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
a =[11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
a =tokenizer.decode(__lowercase , output_char_offsets=__lowercase , filter_word_delimiter_token=__lowercase )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''char_offsets''' in outputs )
self.assertTrue(isinstance(__lowercase , __lowercase ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) , ['''k''', '''s''', '''ɾ''', '''ɾ''', '''|''', '''ɾ''', '''l''', '''|''', '''ɭʲ'''] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''start_offset''' ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''end_offset''' ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
a =self.get_tokenizer(word_delimiter_token='''|''' )
def check_list_tuples_equal(__A , __A ):
self.assertTrue(isinstance(__lowercase , __lowercase ) )
self.assertTrue(isinstance(outputs_list[0] , __lowercase ) )
# transform list to ModelOutput
a =WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch['''text'''] , outputs_batch_a['''text'''] )
def recursive_check(__A , __A ):
if isinstance(__lowercase , __lowercase ):
[recursive_check(__lowercase , __lowercase ) for la, la in zip(__lowercase , __lowercase )]
self.assertEqual(__lowercase , __lowercase )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch['''char_offsets'''] , outputs_batch_a['''char_offsets'''] )
# fmt: off
a =[
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
a =tokenizer.batch_decode(__lowercase , output_char_offsets=__lowercase )
a =[tokenizer.decode(__lowercase , output_char_offsets=__lowercase ) for ids in sample_ids]
check_list_tuples_equal(__lowercase , __lowercase )
@unittest.skip('''Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes''' )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
pass
@unittest.skip('''Wav2Vec2PhonemeTokenizer always puts spaces between phonemes''' )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
pass
@unittest.skip('''encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency''' )
def SCREAMING_SNAKE_CASE ( self ) -> int:
pass
@unittest.skip('''Wav2Vec2PhonemeModel has no max model length => no testing''' )
def SCREAMING_SNAKE_CASE ( self ) -> int:
pass
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a =self.get_tokenizers(do_lower_case=__lowercase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
a =tokenizer.vocab_size
a =len(__lowercase )
self.assertNotEqual(__lowercase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
a =['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
a =tokenizer.add_tokens(__lowercase )
a =tokenizer.vocab_size
a =len(__lowercase )
self.assertNotEqual(__lowercase , 0 )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , len(__lowercase ) )
self.assertEqual(__lowercase , all_size + len(__lowercase ) )
a =tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=__lowercase )
self.assertGreaterEqual(len(__lowercase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
a ={'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
a =tokenizer.add_special_tokens(__lowercase )
a =tokenizer.vocab_size
a =len(__lowercase )
self.assertNotEqual(__lowercase , 0 )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , len(__lowercase ) )
self.assertEqual(__lowercase , all_size_a + len(__lowercase ) )
a =tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=__lowercase )
self.assertGreaterEqual(len(__lowercase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' )
def SCREAMING_SNAKE_CASE ( self ) -> int:
pass
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
pass
def SCREAMING_SNAKE_CASE ( self ) -> int:
# The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which
# is not the case for Wav2Vec2PhonemeCTCTokenizer.
a =self.get_tokenizers(fast=__lowercase , do_lower_case=__lowercase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
a =['''ð''', '''ɪ''', '''s''', '''ɪ''', '''z''', '''ɐ''', '''t''', '''ɛ''', '''k''', '''s''', '''t''']
a =tokenizer.convert_tokens_to_string(__lowercase )
self.assertIsInstance(output['''text'''] , __lowercase )
| 350 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase_ : Union[str, Any] = {"""processing_layoutxlm""": ["""LayoutXLMProcessor"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[int] = ["""LayoutXLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = ["""LayoutXLMTokenizerFast"""]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
lowerCamelCase_ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 215 | 0 |
'''simple docstring'''
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance < 0:
raise ValueError("""Resistance cannot be negative""" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase__ : Optional[Any] = logging.get_logger(__name__)
lowercase__ : List[str] = {"""tokenizer_file""": """tokenizer.json"""}
lowercase__ : List[str] = {
"""tokenizer_file""": {
"""bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""",
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""",
},
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = ["""input_ids""", """attention_mask"""]
_SCREAMING_SNAKE_CASE = None
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int]=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : Optional[Any]="<unk>" , SCREAMING_SNAKE_CASE_ : Optional[int]="<s>" , SCREAMING_SNAKE_CASE_ : Optional[int]="</s>" , SCREAMING_SNAKE_CASE_ : Optional[int]="<pad>" , SCREAMING_SNAKE_CASE_ : List[str]=False , SCREAMING_SNAKE_CASE_ : Any=False , **SCREAMING_SNAKE_CASE_ : Any , ):
super().__init__(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase_ : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , SCREAMING_SNAKE_CASE_ ) != add_prefix_space:
lowerCAmelCase_ : str = getattr(SCREAMING_SNAKE_CASE_ , pre_tok_state.pop('type' ) )
lowerCAmelCase_ : str = add_prefix_space
lowerCAmelCase_ : str = pre_tok_class(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : str = add_prefix_space
def SCREAMING_SNAKE_CASE__ ( self : Any , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : Tuple ):
lowerCAmelCase_ : List[Any] = kwargs.get('is_split_into_words' , SCREAMING_SNAKE_CASE_ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
' pretokenized inputs.' )
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Any , *SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase_ : List[Any] = kwargs.get('is_split_into_words' , SCREAMING_SNAKE_CASE_ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
' pretokenized inputs.' )
return super()._encode_plus(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
lowerCAmelCase_ : Optional[int] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : "Conversation" ):
lowerCAmelCase_ : int = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) + [self.eos_token_id] )
if len(SCREAMING_SNAKE_CASE_ ) > self.model_max_length:
lowerCAmelCase_ : Any = input_ids[-self.model_max_length :]
return input_ids
| 224 | 0 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
__a :str = pytest.mark.integration
@pytest.mark.parametrize("path" ,["paws", "csv"] )
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : str ):
"""simple docstring"""
inspect_dataset(__UpperCamelCase ,__UpperCamelCase )
A_ = path + ".py"
assert script_name in os.listdir(__UpperCamelCase )
assert "__pycache__" not in os.listdir(__UpperCamelCase )
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.parametrize("path" ,["accuracy"] )
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Tuple ):
"""simple docstring"""
inspect_metric(__UpperCamelCase ,__UpperCamelCase )
A_ = path + ".py"
assert script_name in os.listdir(__UpperCamelCase )
assert "__pycache__" not in os.listdir(__UpperCamelCase )
@pytest.mark.parametrize(
"path, config_name, expected_splits" ,[
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] ,)
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : Dict ):
"""simple docstring"""
A_ = get_dataset_config_info(__UpperCamelCase ,config_name=__UpperCamelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" ,[
("paws", None, ValueError),
] ,)
def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
with pytest.raises(__UpperCamelCase ):
get_dataset_config_info(__UpperCamelCase ,config_name=__UpperCamelCase )
@pytest.mark.parametrize(
"path, expected" ,[
("squad", "plain_text"),
("acronym_identification", "default"),
("lhoestq/squad", "plain_text"),
("lhoestq/test", "default"),
("lhoestq/demo1", "lhoestq--demo1"),
("dalle-mini/wit", "dalle-mini--wit"),
] ,)
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Tuple ):
"""simple docstring"""
A_ = get_dataset_config_names(__UpperCamelCase )
assert expected in config_names
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config" ,[
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["dalle-mini--wit"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
] ,)
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Any ,__UpperCamelCase : Any ):
"""simple docstring"""
A_ = get_dataset_infos(__UpperCamelCase )
assert list(infos.keys() ) == expected_configs
A_ = expected_configs[0]
assert expected_config in infos
A_ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits" ,[
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] ,)
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : List[str] ,__UpperCamelCase : List[Any] ):
"""simple docstring"""
A_ = get_dataset_infos(__UpperCamelCase )
assert expected_config in infos
A_ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" ,[
("paws", None, ValueError),
] ,)
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Dict ,__UpperCamelCase : Optional[int] ):
"""simple docstring"""
with pytest.raises(__UpperCamelCase ):
get_dataset_split_names(__UpperCamelCase ,config_name=__UpperCamelCase )
| 355 |
import math
__a :Union[str, Any] = 10
__a :Union[str, Any] = 7
__a :int = BALLS_PER_COLOUR * NUM_COLOURS
def __snake_case ( __UpperCamelCase : int = 20 ):
"""simple docstring"""
A_ = math.comb(__UpperCamelCase ,__UpperCamelCase )
A_ = math.comb(NUM_BALLS - BALLS_PER_COLOUR ,__UpperCamelCase )
A_ = NUM_COLOURS * (1 - missing_colour / total)
return f'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 329 | 0 |
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __lowerCAmelCase ( _lowerCamelCase ):
snake_case_ : Union[str, Any] = 0
snake_case_ : Optional[Any] = False
snake_case_ : Dict = 3.0
class __lowerCAmelCase ( unittest.TestCase ):
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"a": 2} )
self.assertDictEqual(MockClass(a=2 , b=_UpperCamelCase ).to_kwargs() , {"a": 2, "b": True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"a": 2, "c": 2.25} )
@require_cuda
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
_UpperCAmelCase = GradScalerKwargs(init_scale=1_024 , growth_factor=2 )
AcceleratorState._reset_state()
_UpperCAmelCase = Accelerator(mixed_precision="fp16" , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
_UpperCAmelCase = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1_024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2_000 )
self.assertEqual(scaler._enabled , _UpperCamelCase )
@require_multi_gpu
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
_UpperCAmelCase = ["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
if __name__ == "__main__":
lowercase_ : Optional[int] = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
lowercase_ : Optional[int] = Accelerator(kwargs_handlers=[ddp_scaler])
lowercase_ : Dict = torch.nn.Linear(1_00, 2_00)
lowercase_ : int = accelerator.prepare(model)
# Check the values changed in kwargs
lowercase_ : Optional[Any] = ''
lowercase_ : int = model.bucket_bytes_cap // (10_24 * 10_24)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 133 |
def _a ( lowerCamelCase: int = 2_00 ) -> int:
'''simple docstring'''
__A = [1, 2, 5, 10, 20, 50, 1_00, 2_00]
__A = [0] * (pence + 1)
__A = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(lowerCamelCase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73682
| 117 | 0 |
'''simple docstring'''
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
_A : Tuple =TypeVar('''T''')
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Dict:
return (position - 1) // 2
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[Any]:
return (2 * position) + 1
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[str]:
return (2 * position) + 2
class _lowercase ( Generic[T] ):
def __init__( self: Tuple ):
lowerCamelCase__ : List[str] = []
lowerCamelCase__ : Dict = {}
lowerCamelCase__ : Dict = 0
def __len__( self: str ):
return self.elements
def __repr__( self: Optional[Any] ):
return str(self.heap )
def lowerCamelCase_ ( self: Dict ):
return self.elements == 0
def lowerCamelCase_ ( self: str , UpperCamelCase__: List[Any] , UpperCamelCase__: int ):
self.heap.append((elem, weight) )
lowerCamelCase__ : Tuple = self.elements
self.elements += 1
self._bubble_up(a__ )
def lowerCamelCase_ ( self: Optional[int] ):
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.heap[0]
self._bubble_down(a__ )
return elem
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: str ):
lowerCamelCase__ : str = self.position_map[elem]
lowerCamelCase__ : List[str] = (elem, weight)
if position > 0:
lowerCamelCase__ : List[str] = get_parent_position(a__ )
lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(a__ )
else:
self._bubble_down(a__ )
else:
self._bubble_down(a__ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : Dict = self.position_map[elem]
if curr_pos == 0:
return None
lowerCamelCase__ : str = get_parent_position(a__ )
lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.heap[curr_pos]
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(a__ , a__ )
return self._bubble_up(a__ )
return None
def lowerCamelCase_ ( self: int , UpperCamelCase__: List[str] ):
lowerCamelCase__ : Union[str, Any] = self.position_map[elem]
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.heap[curr_pos]
lowerCamelCase__ : str = get_child_left_position(a__ )
lowerCamelCase__ : Optional[int] = get_child_right_position(a__ )
if child_left_position < self.elements and child_right_position < self.elements:
lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.heap[child_left_position]
lowerCamelCase__ , lowerCamelCase__ : Dict = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(a__ , a__ )
return self._bubble_down(a__ )
if child_left_position < self.elements:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(a__ , a__ )
return self._bubble_down(a__ )
else:
return None
if child_right_position < self.elements:
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(a__ , a__ )
return self._bubble_down(a__ )
return None
def lowerCamelCase_ ( self: Any , UpperCamelCase__: str , UpperCamelCase__: str ):
lowerCamelCase__ : List[Any] = self.heap[nodea_pos][0]
lowerCamelCase__ : List[Any] = self.heap[nodea_pos][0]
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
lowerCamelCase__ : Optional[int] = nodea_pos
lowerCamelCase__ : Any = nodea_pos
class _lowercase ( Generic[T] ):
def __init__( self: List[Any] ):
lowerCamelCase__ : Optional[Any] = {}
lowerCamelCase__ : List[Any] = 0
def __repr__( self: Dict ):
return str(self.connections )
def __len__( self: int ):
return self.nodes
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Any ):
if node not in self.connections:
lowerCamelCase__ : Union[str, Any] = {}
self.nodes += 1
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: Tuple , UpperCamelCase__: Any , UpperCamelCase__: int ):
self.add_node(a__ )
self.add_node(a__ )
lowerCamelCase__ : Dict = weight
lowerCamelCase__ : Tuple = weight
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , ) -> Tuple:
lowerCamelCase__ : Tuple = {node: maxsize for node in graph.connections}
lowerCamelCase__ : Tuple = {node: None for node in graph.connections}
lowerCamelCase__ : Optional[Any] = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(UpperCamelCase , UpperCamelCase )
if priority_queue.is_empty():
return dist, parent
# initialization
lowerCamelCase__ : Optional[Any] = priority_queue.extract_min()
lowerCamelCase__ : List[Any] = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
lowerCamelCase__ : Union[str, Any] = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(UpperCamelCase , dist[neighbour] )
lowerCamelCase__ : Dict = node
# running prim's algorithm
while not priority_queue.is_empty():
lowerCamelCase__ : List[Any] = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
lowerCamelCase__ : List[Any] = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(UpperCamelCase , dist[neighbour] )
lowerCamelCase__ : List[Any] = node
return dist, parent
| 354 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
_A : Union[str, Any] =8
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=BITS ) -> Tuple:
lowerCamelCase__ : List[str] = x.device
lowerCamelCase__ : Any = (x * 255).int().clamp(0 , 255 )
lowerCamelCase__ : Optional[int] = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase )
lowerCamelCase__ : int = rearrange(UpperCamelCase , """d -> d 1 1""" )
lowerCamelCase__ : List[str] = rearrange(UpperCamelCase , """b c h w -> b c 1 h w""" )
lowerCamelCase__ : Tuple = ((x & mask) != 0).float()
lowerCamelCase__ : List[Any] = rearrange(UpperCamelCase , """b c d h w -> b (c d) h w""" )
lowerCamelCase__ : Optional[int] = bits * 2 - 1
return bits
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=BITS ) -> List[Any]:
lowerCamelCase__ : List[Any] = x.device
lowerCamelCase__ : Dict = (x > 0).int()
lowerCamelCase__ : Optional[Any] = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase , dtype=torch.intaa )
lowerCamelCase__ : List[Any] = rearrange(UpperCamelCase , """d -> d 1 1""" )
lowerCamelCase__ : List[str] = rearrange(UpperCamelCase , """b (c d) h w -> b c d h w""" , d=8 )
lowerCamelCase__ : List[Any] = reduce(x * mask , """b c d h w -> b c h w""" , """sum""" )
return (dec / 255).clamp(0.0 , 1.0 )
def SCREAMING_SNAKE_CASE_ (self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = 0.0 , UpperCamelCase = True , UpperCamelCase=None , UpperCamelCase = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
lowerCamelCase__ : Optional[int] = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
lowerCamelCase__ : str = self.alphas_cumprod[timestep]
lowerCamelCase__ : List[str] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
lowerCamelCase__ : Optional[int] = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCamelCase__ : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
lowerCamelCase__ : Dict = self.bit_scale
if self.config.clip_sample:
lowerCamelCase__ : Optional[Any] = torch.clamp(UpperCamelCase , -scale , UpperCamelCase )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
lowerCamelCase__ : Tuple = self._get_variance(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Optional[int] = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
lowerCamelCase__ : int = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCamelCase__ : Optional[Any] = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCamelCase__ : Tuple = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
lowerCamelCase__ : Dict = model_output.device if torch.is_tensor(UpperCamelCase ) else """cpu"""
lowerCamelCase__ : str = torch.randn(model_output.shape , dtype=model_output.dtype , generator=UpperCamelCase ).to(UpperCamelCase )
lowerCamelCase__ : Optional[Any] = self._get_variance(UpperCamelCase , UpperCamelCase ) ** 0.5 * eta * noise
lowerCamelCase__ : int = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=UpperCamelCase , pred_original_sample=UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase="epsilon" , UpperCamelCase=None , UpperCamelCase = True , ) -> Union[DDPMSchedulerOutput, Tuple]:
lowerCamelCase__ : List[Any] = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = torch.split(UpperCamelCase , sample.shape[1] , dim=1 )
else:
lowerCamelCase__ : List[str] = None
# 1. compute alphas, betas
lowerCamelCase__ : str = self.alphas_cumprod[t]
lowerCamelCase__ : List[str] = self.alphas_cumprod[t - 1] if t > 0 else self.one
lowerCamelCase__ : str = 1 - alpha_prod_t
lowerCamelCase__ : List[Any] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
lowerCamelCase__ : Union[str, Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
lowerCamelCase__ : Optional[Any] = model_output
else:
raise ValueError(f'''Unsupported prediction_type {prediction_type}.''' )
# 3. Clip "predicted x_0"
lowerCamelCase__ : str = self.bit_scale
if self.config.clip_sample:
lowerCamelCase__ : List[Any] = torch.clamp(UpperCamelCase , -scale , UpperCamelCase )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCamelCase__ : Tuple = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
lowerCamelCase__ : Tuple = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCamelCase__ : int = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
lowerCamelCase__ : Optional[Any] = 0
if t > 0:
lowerCamelCase__ : Optional[Any] = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=UpperCamelCase ).to(model_output.device )
lowerCamelCase__ : str = (self._get_variance(UpperCamelCase , predicted_variance=UpperCamelCase ) ** 0.5) * noise
lowerCamelCase__ : Optional[int] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=UpperCamelCase , pred_original_sample=UpperCamelCase )
class _lowercase ( _lowercase ):
def __init__( self: List[str] , UpperCamelCase__: UNetaDConditionModel , UpperCamelCase__: Union[DDIMScheduler, DDPMScheduler] , UpperCamelCase__: Optional[float] = 1.0 , ):
super().__init__()
lowerCamelCase__ : Optional[int] = bit_scale
lowerCamelCase__ : List[Any] = (
ddim_bit_scheduler_step if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
@torch.no_grad()
def __call__( self: Union[str, Any] , UpperCamelCase__: Optional[int] = 256 , UpperCamelCase__: Optional[int] = 256 , UpperCamelCase__: Optional[int] = 50 , UpperCamelCase__: Optional[torch.Generator] = None , UpperCamelCase__: Optional[int] = 1 , UpperCamelCase__: Optional[str] = "pil" , UpperCamelCase__: bool = True , **UpperCamelCase__: int , ):
lowerCamelCase__ : List[Any] = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=UpperCamelCase__ , )
lowerCamelCase__ : Union[str, Any] = decimal_to_bits(UpperCamelCase__ ) * self.bit_scale
lowerCamelCase__ : Union[str, Any] = latents.to(self.device )
self.scheduler.set_timesteps(UpperCamelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
lowerCamelCase__ : Tuple = self.unet(UpperCamelCase__ , UpperCamelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase__ : Any = self.scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
lowerCamelCase__ : Dict = bits_to_decimal(UpperCamelCase__ )
if output_type == "pil":
lowerCamelCase__ : int = self.numpy_to_pil(UpperCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase__ )
| 129 | 0 |
'''simple docstring'''
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_snake_case = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'
_snake_case = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'
_snake_case = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
def _lowerCamelCase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 , _UpperCamelCase = 4 , ):
"""simple docstring"""
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_UpperCamelCase , hypotheses=_UpperCamelCase , min_len=_UpperCamelCase , max_len=_UpperCamelCase )
}
| 250 |
'''simple docstring'''
from math import factorial
def _A ( snake_case , snake_case ) -> int:
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError("Please enter positive integers for n and k where n >= k" )
return factorial(snake_case ) // (factorial(snake_case ) * factorial(n - k ))
if __name__ == "__main__":
print(
'The number of five-card hands possible from a standard',
F'''fifty-two card deck is: {combinations(52, 5)}\n''',
)
print(
'If a class of 40 students must be arranged into groups of',
F'''4 for group projects, there are {combinations(40, 4)} ways''',
'to arrange them.\n',
)
print(
'If 10 teams are competing in a Formula One race, there',
F'''are {combinations(10, 3)} ways that first, second and''',
'third place can be awarded.',
)
| 250 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a =logging.get_logger(__name__)
a ={
"""google/pix2struct-textcaps-base""": (
"""https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"""
),
}
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Dict = '''pix2struct_text_model'''
_UpperCAmelCase : Union[str, Any] = ['''past_key_values''']
_UpperCAmelCase : str = {
'''hidden_size''': '''hidden_size''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Tuple ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=5_0_2_4_4 ,SCREAMING_SNAKE_CASE__ : Optional[int]=7_6_8 ,SCREAMING_SNAKE_CASE__ : str=6_4 ,SCREAMING_SNAKE_CASE__ : Dict=2_0_4_8 ,SCREAMING_SNAKE_CASE__ : List[str]=1_2 ,SCREAMING_SNAKE_CASE__ : int=1_2 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=3_2 ,SCREAMING_SNAKE_CASE__ : List[str]=1_2_8 ,SCREAMING_SNAKE_CASE__ : List[Any]=0.1 ,SCREAMING_SNAKE_CASE__ : Any=1E-6 ,SCREAMING_SNAKE_CASE__ : str=1.0 ,SCREAMING_SNAKE_CASE__ : Optional[Any]="gelu_new" ,SCREAMING_SNAKE_CASE__ : Tuple=0 ,SCREAMING_SNAKE_CASE__ : List[str]=False ,SCREAMING_SNAKE_CASE__ : str=0 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=1 ,SCREAMING_SNAKE_CASE__ : int=False ,SCREAMING_SNAKE_CASE__ : Dict=True ,**SCREAMING_SNAKE_CASE__ : Tuple ,):
__lowerCamelCase : int = vocab_size
__lowerCamelCase : List[str] = hidden_size
__lowerCamelCase : int = d_kv
__lowerCamelCase : str = d_ff
__lowerCamelCase : Optional[int] = num_layers
__lowerCamelCase : Optional[int] = num_heads
__lowerCamelCase : Optional[Any] = relative_attention_num_buckets
__lowerCamelCase : Any = relative_attention_max_distance
__lowerCamelCase : int = dropout_rate
__lowerCamelCase : Union[str, Any] = layer_norm_epsilon
__lowerCamelCase : Optional[Any] = initializer_factor
__lowerCamelCase : Optional[Any] = use_cache
__lowerCamelCase : str = eos_token_id
__lowerCamelCase : List[str] = decoder_start_token_id
# for backwards compatibility
__lowerCamelCase : int = dense_act_fn
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ ,eos_token_id=SCREAMING_SNAKE_CASE__ ,decoder_start_token_id=SCREAMING_SNAKE_CASE__ ,tie_word_embeddings=SCREAMING_SNAKE_CASE__ ,is_decoder=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
@classmethod
def lowerCAmelCase ( cls : str ,SCREAMING_SNAKE_CASE__ : Union[str, os.PathLike] ,**SCREAMING_SNAKE_CASE__ : Dict):
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = cls.get_config_dict(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type') == "pix2struct":
__lowerCamelCase : Dict = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls ,'model_type') and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Optional[Any] = '''pix2struct_vision_model'''
def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : Dict=7_6_8 ,SCREAMING_SNAKE_CASE__ : List[Any]=7_6_8 ,SCREAMING_SNAKE_CASE__ : List[Any]=2_0_4_8 ,SCREAMING_SNAKE_CASE__ : Dict=6_4 ,SCREAMING_SNAKE_CASE__ : Any=1_2 ,SCREAMING_SNAKE_CASE__ : str=1_2 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="gelu_new" ,SCREAMING_SNAKE_CASE__ : Optional[int]=1E-6 ,SCREAMING_SNAKE_CASE__ : str=0.0 ,SCREAMING_SNAKE_CASE__ : str=0.0 ,SCREAMING_SNAKE_CASE__ : int=1E-10 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=1.0 ,SCREAMING_SNAKE_CASE__ : str=4_0_9_6 ,SCREAMING_SNAKE_CASE__ : List[Any]=3_2 ,SCREAMING_SNAKE_CASE__ : int=1_2_8 ,**SCREAMING_SNAKE_CASE__ : List[str] ,):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = hidden_size
__lowerCamelCase : Any = patch_embed_hidden_size
__lowerCamelCase : Any = d_ff
__lowerCamelCase : Optional[Any] = dropout_rate
__lowerCamelCase : List[str] = num_hidden_layers
__lowerCamelCase : Optional[int] = num_attention_heads
__lowerCamelCase : Dict = initializer_range
__lowerCamelCase : List[str] = initializer_factor
__lowerCamelCase : Union[str, Any] = attention_dropout
__lowerCamelCase : Any = layer_norm_eps
__lowerCamelCase : Dict = dense_act_fn
__lowerCamelCase : Tuple = seq_len
__lowerCamelCase : Union[str, Any] = relative_attention_num_buckets
__lowerCamelCase : Optional[Any] = relative_attention_max_distance
__lowerCamelCase : Tuple = d_kv
@classmethod
def lowerCAmelCase ( cls : Optional[int] ,SCREAMING_SNAKE_CASE__ : Union[str, os.PathLike] ,**SCREAMING_SNAKE_CASE__ : Optional[int]):
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = cls.get_config_dict(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type') == "pix2struct":
__lowerCamelCase : Tuple = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls ,'model_type') and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Optional[int] = '''pix2struct'''
_UpperCAmelCase : List[str] = True
def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : List[str]=None ,SCREAMING_SNAKE_CASE__ : Optional[int]=None ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=1.0 ,SCREAMING_SNAKE_CASE__ : str=0.02 ,SCREAMING_SNAKE_CASE__ : Tuple=False ,SCREAMING_SNAKE_CASE__ : Optional[Any]=False ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=True ,**SCREAMING_SNAKE_CASE__ : List[Any] ,):
super().__init__(tie_word_embeddings=SCREAMING_SNAKE_CASE__ ,is_encoder_decoder=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
if text_config is None:
__lowerCamelCase : Any = {}
logger.info('text_config is None. Initializing the Pix2StructTextConfig with default values.')
if vision_config is None:
__lowerCamelCase : Union[str, Any] = {}
logger.info('vision_config is None. Initializing the Pix2StructVisionConfig with default values.')
__lowerCamelCase : Dict = PixaStructTextConfig(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = PixaStructVisionConfig(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = self.text_config.decoder_start_token_id
__lowerCamelCase : Optional[int] = self.text_config.pad_token_id
__lowerCamelCase : Any = self.text_config.eos_token_id
__lowerCamelCase : Optional[Any] = initializer_factor
__lowerCamelCase : Optional[Any] = initializer_range
__lowerCamelCase : Optional[Any] = self.initializer_range
__lowerCamelCase : Tuple = self.initializer_range
__lowerCamelCase : Any = is_vqa
@classmethod
def lowerCAmelCase ( cls : Optional[int] ,SCREAMING_SNAKE_CASE__ : PixaStructTextConfig ,SCREAMING_SNAKE_CASE__ : PixaStructVisionConfig ,**SCREAMING_SNAKE_CASE__ : Dict):
return cls(text_config=text_config.to_dict() ,vision_config=vision_config.to_dict() ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : List[Any] = copy.deepcopy(self.__dict__)
__lowerCamelCase : Union[str, Any] = self.text_config.to_dict()
__lowerCamelCase : Dict = self.vision_config.to_dict()
__lowerCamelCase : Dict = self.__class__.model_type
return output
| 365 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a ={
"""configuration_pix2struct""": [
"""PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Pix2StructConfig""",
"""Pix2StructTextConfig""",
"""Pix2StructVisionConfig""",
],
"""processing_pix2struct""": ["""Pix2StructProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =["""Pix2StructImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Pix2StructPreTrainedModel""",
"""Pix2StructForConditionalGeneration""",
"""Pix2StructVisionModel""",
"""Pix2StructTextModel""",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 113 | 0 |
'''simple docstring'''
def snake_case_ (_a : Any ):
UpperCAmelCase = []
UpperCAmelCase = set({'''(''', '''[''', '''{'''} )
UpperCAmelCase = set({''')''', ''']''', '''}'''} )
UpperCAmelCase = {'''{''': '''}''', '''[''': ''']''', '''(''': ''')'''}
for i in range(len(_a ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(_a ) == 0 or (len(_a ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(_a ) == 0
def snake_case_ ():
UpperCAmelCase = input('''Enter sequence of brackets: ''' )
if is_balanced(_a ):
print(_a , '''is balanced''' )
else:
print(_a , '''is not balanced''' )
if __name__ == "__main__":
main()
| 34 |
def UpperCAmelCase__ ( _A : dict ):
'''simple docstring'''
a__ =set()
# To detect a back edge, keep track of vertices currently in the recursion stack
a__ =set()
return any(
node not in visited and depth_first_search(_A , _A , _A , _A )
for node in graph )
def UpperCAmelCase__ ( _A : dict , _A : int , _A : set , _A : set ):
'''simple docstring'''
visited.add(_A )
rec_stk.add(_A )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(_A , _A , _A , _A ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(_A )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 188 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = """▁"""
_A = {"""vocab_file""": """spiece.model"""}
_A = {
"""vocab_file""": {
"""google/reformer-crime-and-punishment""": (
"""https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"""
)
}
}
_A = {
"""google/reformer-crime-and-punishment""": 52_42_88,
}
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask']
def __init__(self , _lowerCamelCase , _lowerCamelCase="</s>" , _lowerCamelCase="<unk>" , _lowerCamelCase=[] , _lowerCamelCase = None , **_lowerCamelCase , ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , additional_special_tokens=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
UpperCAmelCase__ : str = vocab_file
UpperCAmelCase__ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCamelCase )
@property
def _a (self ):
"""simple docstring"""
return self.sp_model.get_piece_size()
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self ):
"""simple docstring"""
UpperCAmelCase__ : Dict = self.__dict__.copy()
UpperCAmelCase__ : List[str] = None
return state
def __setstate__(self , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCAmelCase__ : int = {}
UpperCAmelCase__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a (self , _lowerCamelCase ):
"""simple docstring"""
return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
def _a (self , _lowerCamelCase ):
"""simple docstring"""
return self.sp_model.piece_to_id(_lowerCamelCase )
def _a (self , _lowerCamelCase ):
"""simple docstring"""
if index < self.sp_model.get_piece_size():
UpperCAmelCase__ : List[Any] = self.sp_model.IdToPiece(_lowerCamelCase )
return token
def _a (self , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = []
UpperCAmelCase__ : int = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_lowerCamelCase ) + token
UpperCAmelCase__ : Union[str, Any] = []
else:
current_sub_tokens.append(_lowerCamelCase )
out_string += self.sp_model.decode(_lowerCamelCase )
return out_string.strip()
def _a (self , _lowerCamelCase , _lowerCamelCase = None ):
"""simple docstring"""
if not os.path.isdir(_lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase__ : Union[str, Any] = os.path.join(
_lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCamelCase , """wb""" ) as fi:
UpperCAmelCase__ : str = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (out_vocab_file,)
| 166 |
"""simple docstring"""
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
_A = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.' )
@require_torch
@require_tf
@slow
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _a (self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = True , ):
"""simple docstring"""
UpperCAmelCase__ : Dict = [file for file in os.listdir(_lowerCamelCase ) if os.path.isfile(os.path.join(_lowerCamelCase , _lowerCamelCase ) )]
if identifier is not None:
UpperCAmelCase__ : List[str] = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
for n_ in n_identifier:
UpperCAmelCase__ : Optional[int] = [file for file in files if n_ not in file]
else:
UpperCAmelCase__ : List[Any] = [file for file in files if n_identifier not in file]
UpperCAmelCase__ : str = ignore_files or []
ignore_files.append("""__init__.py""" )
UpperCAmelCase__ : Any = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , _lowerCamelCase )
if only_modules:
UpperCAmelCase__ : List[str] = file.split(""".""" )[0]
try:
UpperCAmelCase__ : Any = getattr(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : Optional[int] = doctest.DocTestSuite(_lowerCamelCase )
UpperCAmelCase__ : List[Any] = unittest.TextTestRunner().run(_lowerCamelCase )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F"""{module_identifier} is not a module.""" )
else:
UpperCAmelCase__ : Tuple = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = Path("""src/transformers""" )
UpperCAmelCase__ : Union[str, Any] = """modeling"""
UpperCAmelCase__ : int = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(_lowerCamelCase , identifier=_lowerCamelCase , ignore_files=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = Path("""src/transformers""" )
UpperCAmelCase__ : List[Any] = """tokenization"""
self.analyze_directory(_lowerCamelCase , identifier=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : int = Path("""src/transformers""" )
UpperCAmelCase__ : Optional[int] = """configuration"""
self.analyze_directory(_lowerCamelCase , identifier=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = Path("""src/transformers""" )
UpperCAmelCase__ : List[Any] = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(_lowerCamelCase , n_identifier=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = Path("""docs/source""" )
UpperCAmelCase__ : str = ["""favicon.ico"""]
self.analyze_directory(_lowerCamelCase , ignore_files=_lowerCamelCase , only_modules=_lowerCamelCase )
| 166 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCAmelCase = {
"""configuration_efficientnet""": [
"""EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientNetConfig""",
"""EfficientNetOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["""EfficientNetImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientNetForImageClassification""",
"""EfficientNetModel""",
"""EfficientNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 271 |
def lowerCamelCase_ ( UpperCamelCase__ : int ) -> bool:
"""simple docstring"""
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 90 | 0 |
'''simple docstring'''
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Dict:
if isinstance(UpperCAmelCase__, collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class A__ :
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
pass
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
pass
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
A_ = np.abs((a - b) ).max()
self.assertLessEqual(UpperCamelCase__ , UpperCamelCase__ , f'''Difference between torch and flax is {diff} (>= {tol}).''' )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
A_ = VisionTextDualEncoderConfig.from_vision_text_configs(UpperCamelCase__ , UpperCamelCase__ )
A_ = FlaxVisionTextDualEncoderModel(UpperCamelCase__ )
A_ = model(input_ids=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , **UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ , A_ = self.get_vision_text_model(UpperCamelCase__ , UpperCamelCase__ )
A_ = {"""vision_model""": vision_model, """text_model""": text_model}
A_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**UpperCamelCase__ )
A_ = model(input_ids=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , **UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ , A_ = self.get_vision_text_model(UpperCamelCase__ , UpperCamelCase__ )
A_ = {"""vision_model""": vision_model, """text_model""": text_model}
A_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**UpperCamelCase__ )
A_ = model(input_ids=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ )
A_ = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ )
A_ = FlaxVisionTextDualEncoderModel.from_pretrained(UpperCamelCase__ )
A_ = model(input_ids=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ )
A_ = after_output[0]
A_ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCamelCase__ , 1e-3 )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , **UpperCamelCase__ ) -> int:
'''simple docstring'''
A_ , A_ = self.get_vision_text_model(UpperCamelCase__ , UpperCamelCase__ )
A_ = {"""vision_model""": vision_model, """text_model""": text_model}
A_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**UpperCamelCase__ )
A_ = model(
input_ids=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , output_attentions=UpperCamelCase__ )
A_ = output.vision_model_output.attentions
self.assertEqual(len(UpperCamelCase__ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
A_ = to_atuple(vision_model.config.image_size )
A_ = to_atuple(vision_model.config.patch_size )
A_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
A_ = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
A_ = output.text_model_output.attentions
self.assertEqual(len(UpperCamelCase__ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> str:
'''simple docstring'''
pt_model.to(UpperCamelCase__ )
pt_model.eval()
# prepare inputs
A_ = inputs_dict
A_ = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
A_ = pt_model(**UpperCamelCase__ ).to_tuple()
A_ = fx_model(**UpperCamelCase__ ).to_tuple()
self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(UpperCamelCase__ , pt_output.numpy() , 4e-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(UpperCamelCase__ )
A_ = FlaxVisionTextDualEncoderModel.from_pretrained(UpperCamelCase__ , from_pt=UpperCamelCase__ )
A_ = fx_model_loaded(**UpperCamelCase__ ).to_tuple()
self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(UpperCamelCase__ , pt_output.numpy() , 4e-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(UpperCamelCase__ )
A_ = VisionTextDualEncoderModel.from_pretrained(UpperCamelCase__ , from_flax=UpperCamelCase__ )
pt_model_loaded.to(UpperCamelCase__ )
pt_model_loaded.eval()
with torch.no_grad():
A_ = pt_model_loaded(**UpperCamelCase__ ).to_tuple()
self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(UpperCamelCase__ , pt_output_loaded.numpy() , 4e-2 )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
A_ = VisionTextDualEncoderConfig.from_vision_text_configs(UpperCamelCase__ , UpperCamelCase__ )
A_ = VisionTextDualEncoderModel(UpperCamelCase__ )
A_ = FlaxVisionTextDualEncoderModel(UpperCamelCase__ )
A_ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , UpperCamelCase__ )
A_ = fx_state
self.check_pt_flax_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
A_ = VisionTextDualEncoderConfig.from_vision_text_configs(UpperCamelCase__ , UpperCamelCase__ )
A_ = VisionTextDualEncoderModel(UpperCamelCase__ )
A_ = FlaxVisionTextDualEncoderModel(UpperCamelCase__ )
A_ = load_flax_weights_in_pytorch_model(UpperCamelCase__ , fx_model.params )
self.check_pt_flax_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**UpperCamelCase__ )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**UpperCamelCase__ )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = self.prepare_config_and_inputs()
self.check_save_load(**UpperCamelCase__ )
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**UpperCamelCase__ )
@is_pt_flax_cross_test
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = self.prepare_config_and_inputs()
A_ = config_inputs_dict.pop("""vision_config""" )
A_ = config_inputs_dict.pop("""text_config""" )
A_ = config_inputs_dict
self.check_equivalence_pt_to_flax(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
self.check_equivalence_flax_to_pt(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
@slow
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ , A_ = self.get_pretrained_model_and_inputs()
A_ = model_a(**UpperCamelCase__ )
A_ = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(UpperCamelCase__ )
A_ = FlaxVisionTextDualEncoderModel.from_pretrained(UpperCamelCase__ )
A_ = model_a(**UpperCamelCase__ )
A_ = after_outputs[0]
A_ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCamelCase__ , 1e-5 )
@require_flax
class A__ ( _snake_case , unittest.TestCase ):
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=UpperCamelCase__ , text_from_pt=UpperCamelCase__ , )
A_ = 13
A_ = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
A_ = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
A_ = random_attention_mask([batch_size, 4] )
A_ = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = FlaxViTModel(UpperCamelCase__ )
A_ = FlaxBertModel(UpperCamelCase__ )
return vision_model, text_model
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = FlaxViTModelTester(self )
A_ = FlaxBertModelTester(self )
A_ = vit_model_tester.prepare_config_and_inputs()
A_ = bert_model_tester.prepare_config_and_inputs()
A_ , A_ = vision_config_and_inputs
A_ , A_ , A_ , A_ = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class A__ ( _snake_case , unittest.TestCase ):
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-clip""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=UpperCamelCase__ , text_from_pt=UpperCamelCase__ , )
A_ = 13
A_ = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
A_ = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
A_ = random_attention_mask([batch_size, 4] )
A_ = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = FlaxCLIPVisionModel(UpperCamelCase__ )
A_ = FlaxBertModel(UpperCamelCase__ )
return vision_model, text_model
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = FlaxCLIPVisionModelTester(self )
A_ = FlaxBertModelTester(self )
A_ = clip_model_tester.prepare_config_and_inputs()
A_ = bert_model_tester.prepare_config_and_inputs()
A_ , A_ = vision_config_and_inputs
A_ , A_ , A_ , A_ = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class A__ ( unittest.TestCase ):
@slow
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = FlaxVisionTextDualEncoderModel.from_pretrained("""clip-italian/clip-italian""" , logit_scale_init_value=1.0 )
A_ = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
A_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
A_ = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors="""np""" )
A_ = model(**UpperCamelCase__ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
A_ = np.array([[1.2284727, 0.3104122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , UpperCamelCase__ , atol=1e-3 ) )
| 101 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class A__ ( _snake_case ):
lowercase = 42
lowercase = 42
class A__ ( nn.Module ):
lowercase = 42
lowercase = (16, 32, 96, 256)
lowercase = jnp.floataa
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
A_ = []
for i in range(len(self.block_out_channels ) - 1 ):
A_ = self.block_out_channels[i]
A_ = self.block_out_channels[i + 1]
A_ = nn.Conv(
UpperCamelCase__ , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(UpperCamelCase__ )
A_ = nn.Conv(
UpperCamelCase__ , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(UpperCamelCase__ )
A_ = blocks
A_ = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , UpperCamelCase__ ) -> int:
'''simple docstring'''
A_ = self.conv_in(UpperCamelCase__ )
A_ = nn.silu(UpperCamelCase__ )
for block in self.blocks:
A_ = block(UpperCamelCase__ )
A_ = nn.silu(UpperCamelCase__ )
A_ = self.conv_out(UpperCamelCase__ )
return embedding
@flax_register_to_config
class A__ ( nn.Module , _snake_case , _snake_case ):
lowercase = 32
lowercase = 4
lowercase = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
lowercase = False
lowercase = (320, 640, 1_280, 1_280)
lowercase = 2
lowercase = 8
lowercase = None
lowercase = 1_280
lowercase = 0.0
lowercase = False
lowercase = jnp.floataa
lowercase = True
lowercase = 0
lowercase = "rgb"
lowercase = (16, 32, 96, 256)
def snake_case_ ( self , UpperCamelCase__ ) -> FrozenDict:
'''simple docstring'''
# init input tensors
A_ = (1, self.in_channels, self.sample_size, self.sample_size)
A_ = jnp.zeros(UpperCamelCase__ , dtype=jnp.floataa )
A_ = jnp.ones((1,) , dtype=jnp.intaa )
A_ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
A_ = (1, 3, self.sample_size * 8, self.sample_size * 8)
A_ = jnp.zeros(UpperCamelCase__ , dtype=jnp.floataa )
A_ , A_ = jax.random.split(UpperCamelCase__ )
A_ = {"""params""": params_rng, """dropout""": dropout_rng}
return self.init(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )["params"]
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = self.block_out_channels
A_ = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
A_ = self.num_attention_heads or self.attention_head_dim
# input
A_ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
A_ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
A_ = FlaxTimestepEmbedding(UpperCamelCase__ , dtype=self.dtype )
A_ = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
A_ = self.only_cross_attention
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A_ = (only_cross_attention,) * len(self.down_block_types )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A_ = (num_attention_heads,) * len(self.down_block_types )
# down
A_ = []
A_ = []
A_ = block_out_channels[0]
A_ = nn.Conv(
UpperCamelCase__ , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(UpperCamelCase__ )
for i, down_block_type in enumerate(self.down_block_types ):
A_ = output_channel
A_ = block_out_channels[i]
A_ = i == len(UpperCamelCase__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
A_ = FlaxCrossAttnDownBlockaD(
in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
A_ = FlaxDownBlockaD(
in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(UpperCamelCase__ )
for _ in range(self.layers_per_block ):
A_ = nn.Conv(
UpperCamelCase__ , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(UpperCamelCase__ )
if not is_final_block:
A_ = nn.Conv(
UpperCamelCase__ , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(UpperCamelCase__ )
A_ = down_blocks
A_ = controlnet_down_blocks
# mid
A_ = block_out_channels[-1]
A_ = FlaxUNetMidBlockaDCrossAttn(
in_channels=UpperCamelCase__ , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
A_ = nn.Conv(
UpperCamelCase__ , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 1.0 , UpperCamelCase__ = True , UpperCamelCase__ = False , ) -> Union[FlaxControlNetOutput, Tuple]:
'''simple docstring'''
A_ = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
A_ = jnp.flip(UpperCamelCase__ , axis=1 )
# 1. time
if not isinstance(UpperCamelCase__ , jnp.ndarray ):
A_ = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(UpperCamelCase__ , jnp.ndarray ) and len(timesteps.shape ) == 0:
A_ = timesteps.astype(dtype=jnp.floataa )
A_ = jnp.expand_dims(UpperCamelCase__ , 0 )
A_ = self.time_proj(UpperCamelCase__ )
A_ = self.time_embedding(UpperCamelCase__ )
# 2. pre-process
A_ = jnp.transpose(UpperCamelCase__ , (0, 2, 3, 1) )
A_ = self.conv_in(UpperCamelCase__ )
A_ = jnp.transpose(UpperCamelCase__ , (0, 2, 3, 1) )
A_ = self.controlnet_cond_embedding(UpperCamelCase__ )
sample += controlnet_cond
# 3. down
A_ = (sample,)
for down_block in self.down_blocks:
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A_ , A_ = down_block(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , deterministic=not train )
else:
A_ , A_ = down_block(UpperCamelCase__ , UpperCamelCase__ , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
A_ = self.mid_block(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , deterministic=not train )
# 5. contronet blocks
A_ = ()
for down_block_res_sample, controlnet_block in zip(UpperCamelCase__ , self.controlnet_down_blocks ):
A_ = controlnet_block(UpperCamelCase__ )
controlnet_down_block_res_samples += (down_block_res_sample,)
A_ = controlnet_down_block_res_samples
A_ = self.controlnet_mid_block(UpperCamelCase__ )
# 6. scaling
A_ = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=UpperCamelCase__ , mid_block_res_sample=UpperCamelCase__ )
| 101 | 1 |
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case =get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
lowerCamelCase : Union[str, Any] = XLMRobertaTokenizer
lowerCamelCase : Dict = XLMRobertaTokenizerFast
lowerCamelCase : str = True
lowerCamelCase : Optional[int] = True
def __UpperCAmelCase ( self : Dict ) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase = XLMRobertaTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
lowerCAmelCase = '<pad>'
lowerCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(UpperCAmelCase__ ) , 1_0_0_2 )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_2 )
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
lowerCAmelCase = XLMRobertaTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
lowerCAmelCase = tokenizer.tokenize('This is a test' )
self.assertListEqual(UpperCAmelCase__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowerCAmelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowerCAmelCase = tokenizer.convert_tokens_to_ids(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowerCAmelCase = tokenizer.convert_ids_to_tokens(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def __UpperCAmelCase ( self : List[Any] ) -> str:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCAmelCase = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-xlm-roberta', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
lowerCAmelCase = self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = tokenizer_r.save_pretrained(UpperCAmelCase__ )
lowerCAmelCase = tokenizer_p.save_pretrained(UpperCAmelCase__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
lowerCAmelCase = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# Checks everything loads correctly in the same way
lowerCAmelCase = tokenizer_r.from_pretrained(UpperCAmelCase__ )
lowerCAmelCase = tokenizer_p.from_pretrained(UpperCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase__ , UpperCAmelCase__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(UpperCAmelCase__ )
# Save tokenizer rust, legacy_format=True
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = tokenizer_r.save_pretrained(UpperCAmelCase__ , legacy_format=UpperCAmelCase__ )
lowerCAmelCase = tokenizer_p.save_pretrained(UpperCAmelCase__ )
# Checks it save with the same files
self.assertSequenceEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# Checks everything loads correctly in the same way
lowerCAmelCase = tokenizer_r.from_pretrained(UpperCAmelCase__ )
lowerCAmelCase = tokenizer_p.from_pretrained(UpperCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase__ , UpperCAmelCase__ ) )
shutil.rmtree(UpperCAmelCase__ )
# Save tokenizer rust, legacy_format=False
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = tokenizer_r.save_pretrained(UpperCAmelCase__ , legacy_format=UpperCAmelCase__ )
lowerCAmelCase = tokenizer_p.save_pretrained(UpperCAmelCase__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCAmelCase = tokenizer_r.from_pretrained(UpperCAmelCase__ )
lowerCAmelCase = tokenizer_p.from_pretrained(UpperCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase__ , UpperCAmelCase__ ) )
shutil.rmtree(UpperCAmelCase__ )
@cached_property
def __UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
return XLMRobertaTokenizer.from_pretrained('xlm-roberta-base' )
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(UpperCAmelCase__ , f.name )
lowerCAmelCase = XLMRobertaTokenizer(f.name , keep_accents=UpperCAmelCase__ )
lowerCAmelCase = pickle.dumps(UpperCAmelCase__ )
pickle.loads(UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
if not self.test_rust_tokenizer:
return
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = 'I was born in 92000, and this is falsé.'
lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase__ )
lowerCAmelCase = rust_tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
lowerCAmelCase = rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = tokenizer.encode(UpperCAmelCase__ )
lowerCAmelCase = rust_tokenizer.encode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def __UpperCAmelCase ( self : Optional[Any] ) -> Any:
lowerCAmelCase = 'Hello World!'
lowerCAmelCase = [0, 3_5_3_7_8, 6_6_6_1, 3_8, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@slow
def __UpperCAmelCase ( self : Tuple ) -> Optional[int]:
lowerCAmelCase = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
lowerCAmelCase = [
0,
3_2_9_3,
8_3,
1_0,
4_5_5_2,
4_9_8_9,
7_9_8_6,
6_7_8,
1_0,
5_9_1_5,
1_1_1,
1_7_9_4_5_9,
1_2_4_8_5_0,
4,
6_0_4_4,
2_3_7,
1_2,
6,
5,
6,
4,
6_7_8_0,
7_0_5,
1_5,
1_3_8_8,
4_4,
3_7_8,
1_0_1_1_4,
7_1_1,
1_5_2,
2_0,
6,
5,
2_2_3_7_6,
6_4_2,
1_2_2_1,
1_5_1_9_0,
3_4_1_5_3,
4_5_0,
5_6_0_8,
9_5_9,
1_1_1_9,
5_7_7_0_2,
1_3_6,
1_8_6,
4_7,
1_0_9_8,
2_9_3_6_7,
4_7,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6_0_4_4,
2_3_7,
6_2_8_4,
5_0_9_0_1,
5_2_8,
3_1,
9_0,
3_4,
9_2_7,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@slow
def __UpperCAmelCase ( self : str ) -> Tuple:
# fmt: off
lowerCAmelCase = {'input_ids': [[0, 1_1_0_6_2, 8_2_7_7_2, 7, 1_5, 8_2_7_7_2, 5_3_8, 5_1_5_2_9, 2_3_7, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 2_1_5_1_7_5, 1_3_1_4, 1_3_6, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 5_6_3_5_9, 4_2, 1_2_2_0_0_9, 9, 1_6_4_6_6, 1_6, 8_7_3_4_4, 4_5_3_7, 9, 4_7_1_7, 7_8_3_8_1, 6, 1_5_9_9_5_8, 7, 1_5, 2_4_4_8_0, 6_1_8, 4, 5_2_7, 2_2_6_9_3, 5_4_2_8, 4, 2_7_7_7, 2_4_4_8_0, 9_8_7_4, 4, 4_3_5_2_3, 5_9_4, 4, 8_0_3, 1_8_3_9_2, 3_3_1_8_9, 1_8, 4, 4_3_5_2_3, 2_4_4_4_7, 1_2_3_9_9, 1_0_0, 2_4_9_5_5, 8_3_6_5_8, 9_6_2_6, 1_4_4_0_5_7, 1_5, 8_3_9, 2_2_3_3_5, 1_6, 1_3_6, 2_4_9_5_5, 8_3_6_5_8, 8_3_4_7_9, 1_5, 3_9_1_0_2, 7_2_4, 1_6, 6_7_8, 6_4_5, 2_7_8_9, 1_3_2_8, 4_5_8_9, 4_2, 1_2_2_0_0_9, 1_1_5_7_7_4, 2_3, 8_0_5, 1_3_2_8, 4_6_8_7_6, 7, 1_3_6, 5_3_8_9_4, 1_9_4_0, 4_2_2_2_7, 4_1_1_5_9, 1_7_7_2_1, 8_2_3, 4_2_5, 4, 2_7_5_1_2, 9_8_7_2_2, 2_0_6, 1_3_6, 5_5_3_1, 4_9_7_0, 9_1_9, 1_7_3_3_6, 5, 2], [0, 2_0_0_8_0, 6_1_8, 8_3, 8_2_7_7_5, 4_7, 4_7_9, 9, 1_5_1_7, 7_3, 5_3_8_9_4, 3_3_3, 8_0_5_8_1, 1_1_0_1_1_7, 1_8_8_1_1, 5_2_5_6, 1_2_9_5, 5_1, 1_5_2_5_2_6, 2_9_7, 7_9_8_6, 3_9_0, 1_2_4_4_1_6, 5_3_8, 3_5_4_3_1, 2_1_4, 9_8, 1_5_0_4_4, 2_5_7_3_7, 1_3_6, 7_1_0_8, 4_3_7_0_1, 2_3, 7_5_6, 1_3_5_3_5_5, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_8_1, 6_3_7_7_3, 1_1_9_4_5_5, 6, 1_4_7_7_9_7, 8_8_2_0_3, 7, 6_4_5, 7_0, 2_1, 3_2_8_5, 1_0_2_6_9, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name='xlm-roberta-base' , revision='d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3' , )
| 4 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase ( UpperCAmelCase ) -> None:
create_state_space_tree(UpperCAmelCase , [] , 0 , [0 for i in range(len(UpperCAmelCase ) )] )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> None:
if index == len(UpperCAmelCase ):
print(UpperCAmelCase )
return
for i in range(len(UpperCAmelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
snake_case_ = True
create_state_space_tree(UpperCAmelCase , UpperCAmelCase , index + 1 , UpperCAmelCase )
current_sequence.pop()
snake_case_ = False
__UpperCamelCase = [3, 1, 2, 4]
generate_all_permutations(sequence)
__UpperCamelCase = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 69 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__A = {
'''configuration_efficientnet''': [
'''EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientNetConfig''',
'''EfficientNetOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['''EfficientNetImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientNetForImageClassification''',
'''EfficientNetModel''',
'''EfficientNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 365 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
__A = random.Random()
def snake_case_(_UpperCamelCase , _UpperCamelCase=1.0 , _UpperCamelCase=None , _UpperCamelCase=None ) -> Optional[int]:
"""simple docstring"""
if rng is None:
_snake_case = global_rng
_snake_case = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowercase_ ( unittest.TestCase ):
def __init__( self : List[Any] , A__ : List[Any] , A__ : int=7 , A__ : Tuple=400 , A__ : int=2000 , A__ : Any=2048 , A__ : List[Any]=128 , A__ : Optional[int]=1 , A__ : Optional[Any]=512 , A__ : Any=30 , A__ : Any=44100 , ) -> int:
_snake_case = parent
_snake_case = batch_size
_snake_case = min_seq_length
_snake_case = max_seq_length
_snake_case = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_snake_case = spectrogram_length
_snake_case = feature_size
_snake_case = num_audio_channels
_snake_case = hop_length
_snake_case = chunk_length
_snake_case = sampling_rate
def UpperCamelCase_ ( self : str ) -> Optional[int]:
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def UpperCamelCase_ ( self : Any , A__ : Any=False , A__ : List[str]=False ) -> Tuple:
def _flatten(A__ : List[str] ):
return list(itertools.chain(*A__ ) )
if equal_length:
_snake_case = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_snake_case = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_snake_case = [np.asarray(A__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase_ ( __lowercase , unittest.TestCase ):
UpperCamelCase_ : Optional[int] = TvltFeatureExtractor
def UpperCamelCase_ ( self : Dict ) -> List[str]:
_snake_case = TvltFeatureExtractionTester(self )
def UpperCamelCase_ ( self : int ) -> Optional[int]:
_snake_case = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(A__ , '''spectrogram_length''' ) )
self.assertTrue(hasattr(A__ , '''feature_size''' ) )
self.assertTrue(hasattr(A__ , '''num_audio_channels''' ) )
self.assertTrue(hasattr(A__ , '''hop_length''' ) )
self.assertTrue(hasattr(A__ , '''chunk_length''' ) )
self.assertTrue(hasattr(A__ , '''sampling_rate''' ) )
def UpperCamelCase_ ( self : Any ) -> Union[str, Any]:
_snake_case = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_snake_case = feat_extract_first.save_pretrained(A__ )[0]
check_json_file_has_correct_format(A__ )
_snake_case = self.feature_extraction_class.from_pretrained(A__ )
_snake_case = feat_extract_first.to_dict()
_snake_case = feat_extract_second.to_dict()
_snake_case = dict_first.pop('''mel_filters''' )
_snake_case = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(A__ , A__ ) )
self.assertEqual(A__ , A__ )
def UpperCamelCase_ ( self : int ) -> Union[str, Any]:
_snake_case = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_snake_case = os.path.join(A__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(A__ )
_snake_case = self.feature_extraction_class.from_json_file(A__ )
_snake_case = feat_extract_first.to_dict()
_snake_case = feat_extract_second.to_dict()
_snake_case = dict_first.pop('''mel_filters''' )
_snake_case = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(A__ , A__ ) )
self.assertEqual(A__ , A__ )
def UpperCamelCase_ ( self : Union[str, Any] ) -> Any:
# Initialize feature_extractor
_snake_case = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
_snake_case = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_snake_case = [np.asarray(A__ ) for speech_input in speech_inputs]
# Test not batched input
_snake_case = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
_snake_case = feature_extractor(A__ , return_tensors='''np''' , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
_snake_case = feature_extractor(
A__ , return_tensors='''np''' , sampling_rate=44100 , mask_audio=A__ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
_snake_case = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_snake_case = np.asarray(A__ )
_snake_case = feature_extractor(A__ , return_tensors='''np''' , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def UpperCamelCase_ ( self : Optional[Any] , A__ : Any ) -> Optional[int]:
_snake_case = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
_snake_case = ds.sort('''id''' ).select(range(A__ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def UpperCamelCase_ ( self : List[str] ) -> Optional[Any]:
_snake_case = self._load_datasamples(1 )
_snake_case = TvltFeatureExtractor()
_snake_case = feature_extractor(A__ , return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
_snake_case = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , A__ , atol=1e-4 ) )
| 278 | 0 |
snake_case__ : str = {
'''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''',
'''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''',
'''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''',
'''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''',
'''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''',
'''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''',
''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''',
'''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''',
'''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/'''
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
snake_case__ : str = {value: key for key, value in MORSE_CODE_DICT.items()}
def _a ( lowerCamelCase: str ) -> str:
'''simple docstring'''
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def _a ( lowerCamelCase: str ) -> str:
'''simple docstring'''
return "".join(REVERSE_DICT[char] for char in message.split() )
def _a ( ) -> None:
'''simple docstring'''
__A = '''Morse code here!'''
print(a__ )
__A = encrypt(a__ )
print(a__ )
__A = decrypt(a__ )
print(a__ )
if __name__ == "__main__":
main()
| 117 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ :str = {
'''configuration_megatron_bert''': ['''MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegatronBertConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :Union[str, Any] = [
'''MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegatronBertForCausalLM''',
'''MegatronBertForMaskedLM''',
'''MegatronBertForMultipleChoice''',
'''MegatronBertForNextSentencePrediction''',
'''MegatronBertForPreTraining''',
'''MegatronBertForQuestionAnswering''',
'''MegatronBertForSequenceClassification''',
'''MegatronBertForTokenClassification''',
'''MegatronBertModel''',
'''MegatronBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ :List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 329 | 0 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = (DEISMultistepScheduler,)
A_ = (("num_inference_steps", 25),)
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
__a : Union[str, Any] = {
'num_train_timesteps': 1000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
}
config.update(**__a )
return config
def __UpperCAmelCase ( self , __a=0 , **__a ):
'''simple docstring'''
__a : Optional[int] = dict(self.forward_default_kwargs )
__a : Dict = kwargs.pop('num_inference_steps' , __a )
__a : List[Any] = self.dummy_sample
__a : Tuple = 0.1 * sample
__a : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__a : int = self.get_scheduler_config(**__a )
__a : Optional[Any] = scheduler_class(**__a )
scheduler.set_timesteps(__a )
# copy over dummy past residuals
__a : Optional[int] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__a )
__a : Dict = scheduler_class.from_pretrained(__a )
new_scheduler.set_timesteps(__a )
# copy over dummy past residuals
__a : List[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
__a , __a : Optional[Any] = sample, sample
for t in range(__a , time_step + scheduler.config.solver_order + 1 ):
__a : List[str] = scheduler.step(__a , __a , __a , **__a ).prev_sample
__a : Dict = new_scheduler.step(__a , __a , __a , **__a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self , __a=0 , **__a ):
'''simple docstring'''
__a : Dict = dict(self.forward_default_kwargs )
__a : Tuple = kwargs.pop('num_inference_steps' , __a )
__a : Optional[Any] = self.dummy_sample
__a : Any = 0.1 * sample
__a : Tuple = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__a : Any = self.get_scheduler_config()
__a : int = scheduler_class(**__a )
scheduler.set_timesteps(__a )
# copy over dummy past residuals (must be after setting timesteps)
__a : Tuple = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__a )
__a : Any = scheduler_class.from_pretrained(__a )
# copy over dummy past residuals
new_scheduler.set_timesteps(__a )
# copy over dummy past residual (must be after setting timesteps)
__a : Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order]
__a : List[str] = scheduler.step(__a , __a , __a , **__a ).prev_sample
__a : Tuple = new_scheduler.step(__a , __a , __a , **__a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __UpperCAmelCase ( self , __a=None , **__a ):
'''simple docstring'''
if scheduler is None:
__a : str = self.scheduler_classes[0]
__a : Optional[Any] = self.get_scheduler_config(**__a )
__a : str = scheduler_class(**__a )
__a : Optional[int] = self.scheduler_classes[0]
__a : Any = self.get_scheduler_config(**__a )
__a : Optional[int] = scheduler_class(**__a )
__a : List[Any] = 10
__a : Any = self.dummy_model()
__a : Optional[Any] = self.dummy_sample_deter
scheduler.set_timesteps(__a )
for i, t in enumerate(scheduler.timesteps ):
__a : Optional[Any] = model(__a , __a )
__a : Union[str, Any] = scheduler.step(__a , __a , __a ).prev_sample
return sample
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = dict(self.forward_default_kwargs )
__a : Optional[int] = kwargs.pop('num_inference_steps' , __a )
for scheduler_class in self.scheduler_classes:
__a : Any = self.get_scheduler_config()
__a : Optional[int] = scheduler_class(**__a )
__a : List[Any] = self.dummy_sample
__a : List[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(__a , 'set_timesteps' ):
scheduler.set_timesteps(__a )
elif num_inference_steps is not None and not hasattr(__a , 'set_timesteps' ):
__a : Any = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__a : Dict = [residual + 0.2, residual + 0.15, residual + 0.10]
__a : List[Any] = dummy_past_residuals[: scheduler.config.solver_order]
__a : List[str] = scheduler.timesteps[5]
__a : Optional[Any] = scheduler.timesteps[6]
__a : Optional[Any] = scheduler.step(__a , __a , __a , **__a ).prev_sample
__a : str = scheduler.step(__a , __a , __a , **__a ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = DEISMultistepScheduler(**self.get_scheduler_config() )
__a : Optional[int] = self.full_loop(scheduler=__a )
__a : Any = torch.mean(torch.abs(__a ) )
assert abs(result_mean.item() - 0.23916 ) < 1E-3
__a : Dict = DPMSolverSinglestepScheduler.from_config(scheduler.config )
__a : Dict = DPMSolverMultistepScheduler.from_config(scheduler.config )
__a : int = UniPCMultistepScheduler.from_config(scheduler.config )
__a : Optional[int] = DEISMultistepScheduler.from_config(scheduler.config )
__a : str = self.full_loop(scheduler=__a )
__a : Any = torch.mean(torch.abs(__a ) )
assert abs(result_mean.item() - 0.23916 ) < 1E-3
def __UpperCAmelCase ( self ):
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.check_over_configs(thresholding=__a )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__a , prediction_type=__a , sample_max_value=__a , algorithm_type='deis' , solver_order=__a , solver_type=__a , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__a , solver_type=__a , prediction_type=__a , algorithm_type=__a , )
__a : int = self.full_loop(
solver_order=__a , solver_type=__a , prediction_type=__a , algorithm_type=__a , )
assert not torch.isnan(__a ).any(), "Samples have nan numbers"
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.check_over_configs(lower_order_final=__a )
self.check_over_configs(lower_order_final=__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=__a , time_step=0 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.full_loop()
__a : Optional[Any] = torch.mean(torch.abs(__a ) )
assert abs(result_mean.item() - 0.23916 ) < 1E-3
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = self.full_loop(prediction_type='v_prediction' )
__a : Any = torch.mean(torch.abs(__a ) )
assert abs(result_mean.item() - 0.091 ) < 1E-3
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = self.scheduler_classes[0]
__a : str = self.get_scheduler_config(thresholding=__a , dynamic_thresholding_ratio=0 )
__a : Dict = scheduler_class(**__a )
__a : Any = 10
__a : Tuple = self.dummy_model()
__a : Tuple = self.dummy_sample_deter.half()
scheduler.set_timesteps(__a )
for i, t in enumerate(scheduler.timesteps ):
__a : List[Any] = model(__a , __a )
__a : List[Any] = scheduler.step(__a , __a , __a ).prev_sample
assert sample.dtype == torch.floataa
| 294 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase : Tuple = {
'configuration_distilbert': [
'DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'DistilBertConfig',
'DistilBertOnnxConfig',
],
'tokenization_distilbert': ['DistilBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : str = ['DistilBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Any = [
'DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DistilBertForMaskedLM',
'DistilBertForMultipleChoice',
'DistilBertForQuestionAnswering',
'DistilBertForSequenceClassification',
'DistilBertForTokenClassification',
'DistilBertModel',
'DistilBertPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] = [
'TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDistilBertForMaskedLM',
'TFDistilBertForMultipleChoice',
'TFDistilBertForQuestionAnswering',
'TFDistilBertForSequenceClassification',
'TFDistilBertForTokenClassification',
'TFDistilBertMainLayer',
'TFDistilBertModel',
'TFDistilBertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] = [
'FlaxDistilBertForMaskedLM',
'FlaxDistilBertForMultipleChoice',
'FlaxDistilBertForQuestionAnswering',
'FlaxDistilBertForSequenceClassification',
'FlaxDistilBertForTokenClassification',
'FlaxDistilBertModel',
'FlaxDistilBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
__lowercase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 294 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__snake_case : Optional[Any] = {
"""configuration_ctrl""": ["""CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CTRLConfig"""],
"""tokenization_ctrl""": ["""CTRLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Dict = [
"""CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CTRLForSequenceClassification""",
"""CTRLLMHeadModel""",
"""CTRLModel""",
"""CTRLPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[str] = [
"""TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCTRLForSequenceClassification""",
"""TFCTRLLMHeadModel""",
"""TFCTRLModel""",
"""TFCTRLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
__snake_case : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 248 |
from __future__ import annotations
def _UpperCAmelCase ( a__):
'''simple docstring'''
a_ : List[str] = str(a__)
return len(a__) == 9 and set(a__) == set("""123456789""")
def _UpperCAmelCase ( ):
'''simple docstring'''
for base_num in range(9_9_9_9 , 4_9_9_9 , -1):
a_ : Dict = 1_0_0_0_0_2 * base_num
if is_9_pandigital(a__):
return candidate
for base_num in range(3_3_3 , 9_9 , -1):
a_ : Tuple = 1_0_0_2_0_0_3 * base_num
if is_9_pandigital(a__):
return candidate
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 248 | 1 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
def __init__( self ,_snake_case ,_snake_case=13 ,_snake_case=32 ,_snake_case=2 ,_snake_case=3 ,_snake_case=16 ,_snake_case=[1, 2, 1] ,_snake_case=[2, 2, 4] ,_snake_case=2 ,_snake_case=2.0 ,_snake_case=True ,_snake_case=0.0 ,_snake_case=0.0 ,_snake_case=0.1 ,_snake_case="gelu" ,_snake_case=False ,_snake_case=True ,_snake_case=0.02 ,_snake_case=1E-5 ,_snake_case=True ,_snake_case=None ,_snake_case=True ,_snake_case=10 ,_snake_case=8 ,):
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : Union[str, Any] = batch_size
UpperCAmelCase_ : Any = image_size
UpperCAmelCase_ : Dict = patch_size
UpperCAmelCase_ : Dict = num_channels
UpperCAmelCase_ : List[Any] = embed_dim
UpperCAmelCase_ : str = depths
UpperCAmelCase_ : Dict = num_heads
UpperCAmelCase_ : str = window_size
UpperCAmelCase_ : int = mlp_ratio
UpperCAmelCase_ : Union[str, Any] = qkv_bias
UpperCAmelCase_ : Dict = hidden_dropout_prob
UpperCAmelCase_ : str = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[int] = drop_path_rate
UpperCAmelCase_ : List[str] = hidden_act
UpperCAmelCase_ : Optional[int] = use_absolute_embeddings
UpperCAmelCase_ : Any = patch_norm
UpperCAmelCase_ : Union[str, Any] = layer_norm_eps
UpperCAmelCase_ : Optional[int] = initializer_range
UpperCAmelCase_ : Tuple = is_training
UpperCAmelCase_ : Any = scope
UpperCAmelCase_ : Optional[Any] = use_labels
UpperCAmelCase_ : Optional[int] = type_sequence_label_size
UpperCAmelCase_ : int = encoder_stride
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : Tuple = None
if self.use_labels:
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : Optional[int] = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ):
return SwinvaConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ):
UpperCAmelCase_ : Tuple = SwinvaModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(__UpperCAmelCase )
UpperCAmelCase_ : Tuple = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCAmelCase_ : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ):
UpperCAmelCase_ : Any = SwinvaForMaskedImageModeling(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase_ : List[Any] = model(__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase_ : Optional[Any] = 1
UpperCAmelCase_ : Dict = SwinvaForMaskedImageModeling(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase_ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : str = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ):
UpperCAmelCase_ : str = self.type_sequence_label_size
UpperCAmelCase_ : str = SwinvaForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase_ : Any = model(__UpperCAmelCase ,labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase_ : List[Any] = config_and_inputs
UpperCAmelCase_ : Dict = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : List[str] =(
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
__A : List[str] =(
{"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification}
if is_torch_available()
else {}
)
__A : Dict =False
__A : Optional[Any] =False
__A : Union[str, Any] =False
__A : Optional[Any] =False
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = SwinvaModelTester(self )
UpperCAmelCase_ : Any = ConfigTester(self ,config_class=__UpperCAmelCase ,embed_dim=37 )
def UpperCamelCase__ ( self ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
@unittest.skip(reason="Got `CUDA error: misaligned address` with PyTorch 2.0.0." )
def UpperCamelCase__ ( self ):
pass
@unittest.skip(reason="Swinv2 does not use inputs_embeds" )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Union[str, Any] = model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
UpperCAmelCase_ : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase ,nn.Linear ) )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Tuple = model_class(__UpperCAmelCase )
UpperCAmelCase_ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : str = [*signature.parameters.keys()]
UpperCAmelCase_ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,__UpperCAmelCase )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Optional[Any] = True
for model_class in self.all_model_classes:
UpperCAmelCase_ : Union[str, Any] = True
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : Optional[int] = True
UpperCAmelCase_ : int = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : List[Any] = model(**self._prepare_for_class(__UpperCAmelCase ,__UpperCAmelCase ) )
UpperCAmelCase_ : str = outputs.attentions
UpperCAmelCase_ : Any = len(self.model_tester.depths )
self.assertEqual(len(__UpperCAmelCase ) ,__UpperCAmelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase_ : Dict = True
UpperCAmelCase_ : int = config.window_size**2
UpperCAmelCase_ : Any = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : int = model(**self._prepare_for_class(__UpperCAmelCase ,__UpperCAmelCase ) )
UpperCAmelCase_ : Dict = outputs.attentions
self.assertEqual(len(__UpperCAmelCase ) ,__UpperCAmelCase )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
UpperCAmelCase_ : Dict = len(__UpperCAmelCase )
# Check attention is always last and order is fine
UpperCAmelCase_ : Any = True
UpperCAmelCase_ : Any = True
UpperCAmelCase_ : Optional[int] = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : List[str] = model(**self._prepare_for_class(__UpperCAmelCase ,__UpperCAmelCase ) )
if hasattr(self.model_tester ,"num_hidden_states_types" ):
UpperCAmelCase_ : Any = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
UpperCAmelCase_ : Optional[int] = 2
self.assertEqual(out_len + added_hidden_states ,len(__UpperCAmelCase ) )
UpperCAmelCase_ : Tuple = outputs.attentions
self.assertEqual(len(__UpperCAmelCase ) ,__UpperCAmelCase )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ):
UpperCAmelCase_ : Optional[int] = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model(**self._prepare_for_class(__UpperCAmelCase ,__UpperCAmelCase ) )
UpperCAmelCase_ : List[Any] = outputs.hidden_states
UpperCAmelCase_ : List[Any] = getattr(
self.model_tester ,"expected_num_hidden_layers" ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__UpperCAmelCase ) ,__UpperCAmelCase )
# Swinv2 has a different seq_length
UpperCAmelCase_ : List[str] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ : Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
UpperCAmelCase_ : int = outputs.reshaped_hidden_states
self.assertEqual(len(__UpperCAmelCase ) ,__UpperCAmelCase )
UpperCAmelCase_ : str = reshaped_hidden_states[0].shape
UpperCAmelCase_ : Any = (
reshaped_hidden_states[0].view(__UpperCAmelCase ,__UpperCAmelCase ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
UpperCAmelCase_ : Union[str, Any] = True
self.check_hidden_states_output(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Union[str, Any] = True
self.check_hidden_states_output(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Tuple = 3
UpperCAmelCase_ : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCAmelCase_ : List[str] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ : str = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCAmelCase_ : Union[str, Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
UpperCAmelCase_ : int = True
self.check_hidden_states_output(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Tuple = True
self.check_hidden_states_output(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,(padded_height, padded_width) )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__UpperCAmelCase )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
@slow
def UpperCamelCase__ ( self ):
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Dict = SwinvaModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Tuple = _config_zero_init(__UpperCAmelCase )
for model_class in self.all_model_classes:
UpperCAmelCase_ : List[Any] = model_class(config=__UpperCAmelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() ,[0.0, 1.0] ,msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' ,)
@require_vision
@require_torch
class _snake_case (unittest.TestCase):
@cached_property
def UpperCamelCase__ ( self ):
return (
AutoImageProcessor.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" )
if is_vision_available()
else None
)
@slow
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = SwinvaForImageClassification.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" ).to(
__UpperCAmelCase )
UpperCAmelCase_ : Tuple = self.default_image_processor
UpperCAmelCase_ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
UpperCAmelCase_ : Any = image_processor(images=__UpperCAmelCase ,return_tensors="pt" ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**__UpperCAmelCase )
# verify the logits
UpperCAmelCase_ : int = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape ,__UpperCAmelCase )
UpperCAmelCase_ : Union[str, Any] = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__UpperCAmelCase ,atol=1E-4 ) )
| 366 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_lowerCamelCase = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", f"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
f"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
f"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""),
("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
]
)
def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = state_dict.pop(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = val
def a__ ( _SCREAMING_SNAKE_CASE : int ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Dict = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCAmelCase_ : Optional[int] = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
UpperCAmelCase_ : Union[str, Any] = value
else:
UpperCAmelCase_ : int = value
return new_state_dict
def a__ ( _SCREAMING_SNAKE_CASE : List[str] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Dict = ""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCAmelCase_ : str = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCAmelCase_ : Optional[int] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ : List[Any] = in_proj_weight[:2_56, :]
UpperCAmelCase_ : Optional[int] = in_proj_bias[:2_56]
UpperCAmelCase_ : Dict = in_proj_weight[2_56:5_12, :]
UpperCAmelCase_ : Dict = in_proj_bias[2_56:5_12]
UpperCAmelCase_ : int = in_proj_weight[-2_56:, :]
UpperCAmelCase_ : Dict = in_proj_bias[-2_56:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
UpperCAmelCase_ : Optional[int] = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCAmelCase_ : str = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ : Union[str, Any] = in_proj_weight[:2_56, :]
UpperCAmelCase_ : Optional[int] = in_proj_bias[:2_56]
UpperCAmelCase_ : Optional[Any] = in_proj_weight[2_56:5_12, :]
UpperCAmelCase_ : List[str] = in_proj_bias[2_56:5_12]
UpperCAmelCase_ : Optional[int] = in_proj_weight[-2_56:, :]
UpperCAmelCase_ : List[Any] = in_proj_bias[-2_56:]
# read in weights + bias of input projection layer of cross-attention
UpperCAmelCase_ : int = state_dict.pop(
F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
UpperCAmelCase_ : Union[str, Any] = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
UpperCAmelCase_ : List[str] = in_proj_weight_cross_attn[:2_56, :]
UpperCAmelCase_ : Dict = in_proj_bias_cross_attn[:2_56]
UpperCAmelCase_ : List[Any] = in_proj_weight_cross_attn[2_56:5_12, :]
UpperCAmelCase_ : int = in_proj_bias_cross_attn[2_56:5_12]
UpperCAmelCase_ : int = in_proj_weight_cross_attn[-2_56:, :]
UpperCAmelCase_ : str = in_proj_bias_cross_attn[-2_56:]
def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple ) -> Any:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = image.size
UpperCAmelCase_ : int = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = 8_00 if "detection" in checkpoint_url else 10_00
UpperCAmelCase_ : str = target_max_size / current_max_size
UpperCAmelCase_ : Tuple = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def a__ ( _SCREAMING_SNAKE_CASE : Any ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Any = F.to_tensor(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = F.normalize(_SCREAMING_SNAKE_CASE , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str ) -> Any:
"""simple docstring"""
logger.info("Converting model..." )
# load original state dict
UpperCAmelCase_ : Union[str, Any] = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location="cpu" )
# rename keys
for src, dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = rename_backbone_keys(_SCREAMING_SNAKE_CASE )
# query, key and value matrices need special treatment
read_in_q_k_v(_SCREAMING_SNAKE_CASE )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCAmelCase_ : Any = "model."
for key in state_dict.copy().keys():
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
UpperCAmelCase_ : Optional[Any] = state_dict.pop(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = val
# create HuggingFace model and load state dict
UpperCAmelCase_ : str = TableTransformerConfig(
backbone="resnet18" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
UpperCAmelCase_ : str = 15
UpperCAmelCase_ : str = 2
UpperCAmelCase_ : Union[str, Any] = {0: "table", 1: "table rotated"}
UpperCAmelCase_ : Tuple = idalabel
UpperCAmelCase_ : List[str] = {v: k for k, v in idalabel.items()}
else:
UpperCAmelCase_ : Tuple = 1_25
UpperCAmelCase_ : Tuple = 6
UpperCAmelCase_ : Union[str, Any] = {
0: "table",
1: "table column",
2: "table row",
3: "table column header",
4: "table projected row header",
5: "table spanning cell",
}
UpperCAmelCase_ : str = idalabel
UpperCAmelCase_ : Any = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ : List[Any] = DetrImageProcessor(
format="coco_detection" , max_size=8_00 if "detection" in checkpoint_url else 10_00 )
UpperCAmelCase_ : Optional[int] = TableTransformerForObjectDetection(_SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
model.eval()
# verify our conversion
UpperCAmelCase_ : Optional[Any] = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png"
UpperCAmelCase_ : Dict = hf_hub_download(repo_id="nielsr/example-pdf" , repo_type="dataset" , filename=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = Image.open(_SCREAMING_SNAKE_CASE ).convert("RGB" )
UpperCAmelCase_ : int = normalize(resize(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ).unsqueeze(0 )
UpperCAmelCase_ : Optional[int] = model(_SCREAMING_SNAKE_CASE )
if "detection" in checkpoint_url:
UpperCAmelCase_ : Any = (1, 15, 3)
UpperCAmelCase_ : Optional[int] = torch.tensor(
[[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] )
UpperCAmelCase_ : Dict = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] )
else:
UpperCAmelCase_ : Union[str, Any] = (1, 1_25, 7)
UpperCAmelCase_ : List[str] = torch.tensor(
[[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] )
UpperCAmelCase_ : Any = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
# Push model to HF hub
logger.info("Pushing model to the hub..." )
UpperCAmelCase_ : List[str] = (
"microsoft/table-transformer-detection"
if "detection" in checkpoint_url
else "microsoft/table-transformer-structure-recognition"
)
model.push_to_hub(_SCREAMING_SNAKE_CASE )
image_processor.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
type=str,
choices=[
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""",
],
help="""URL of the Table Transformer checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_lowerCamelCase = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 67 | 0 |
'''simple docstring'''
def lowercase__ ( __lowercase : float , __lowercase : int ) -> float:
"""simple docstring"""
if digit_amount > 0:
return round(number - int(__lowercase ) , __lowercase )
return number - int(__lowercase )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 53 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
A_ : Union[str, Any] = logging.get_logger(__name__)
A_ : Union[str, Any] = [
("""bert.bert""", """visual_bert"""),
("""bert.cls""", """cls"""),
("""bert.classifier""", """cls"""),
("""token_type_embeddings_visual""", """visual_token_type_embeddings"""),
("""position_embeddings_visual""", """visual_position_embeddings"""),
("""projection""", """visual_projection"""),
]
A_ : int = [
"""nlvr2_coco_pre_trained.th""",
"""nlvr2_fine_tuned.th""",
"""nlvr2_pre_trained.th""",
"""vcr_coco_pre_train.th""",
"""vcr_fine_tune.th""",
"""vcr_pre_train.th""",
"""vqa_coco_pre_trained.th""",
"""vqa_fine_tuned.th""",
"""vqa_pre_trained.th""",
]
def snake_case_ ( lowerCAmelCase_ )-> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Dict = torch.load(lowerCAmelCase_ , map_location="""cpu""" )
return sd
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=rename_keys_prefix )-> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Any = OrderedDict()
_UpperCAmelCase : Any = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
_UpperCAmelCase : Dict = key
for name_pair in rename_keys_prefix:
_UpperCAmelCase : str = new_key.replace(name_pair[0] , name_pair[1] )
_UpperCAmelCase : str = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
_UpperCAmelCase : int = new_d["""cls.predictions.bias"""]
return new_d
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any:
'''simple docstring'''
assert (
checkpoint_path.split("""/""" )[-1] in ACCEPTABLE_CHECKPOINTS
), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
_UpperCAmelCase : Optional[int] = """pretraining"""
if "vcr" in checkpoint_path:
_UpperCAmelCase : Optional[int] = {"""visual_embedding_dim""": 512}
elif "vqa_advanced" in checkpoint_path:
_UpperCAmelCase : List[Any] = {"""visual_embedding_dim""": 2048}
elif "vqa" in checkpoint_path:
_UpperCAmelCase : Any = {"""visual_embedding_dim""": 2048}
elif "nlvr" in checkpoint_path:
_UpperCAmelCase : Any = {"""visual_embedding_dim""": 1024}
else:
raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
_UpperCAmelCase : str = {"""visual_embedding_dim""": 512}
_UpperCAmelCase : int = """multichoice"""
elif "vqa_advanced" in checkpoint_path:
_UpperCAmelCase : str = {"""visual_embedding_dim""": 2048}
_UpperCAmelCase : int = """vqa_advanced"""
elif "vqa" in checkpoint_path:
_UpperCAmelCase : List[str] = {"""visual_embedding_dim""": 2048, """num_labels""": 3129}
_UpperCAmelCase : int = """vqa"""
elif "nlvr" in checkpoint_path:
_UpperCAmelCase : int = {
"""visual_embedding_dim""": 1024,
"""num_labels""": 2,
}
_UpperCAmelCase : Optional[Any] = """nlvr"""
_UpperCAmelCase : int = VisualBertConfig(**lowerCAmelCase_ )
# Load State Dict
_UpperCAmelCase : Any = load_state_dict(lowerCAmelCase_ )
_UpperCAmelCase : Optional[int] = get_new_dict(lowerCAmelCase_ , lowerCAmelCase_ )
if model_type == "pretraining":
_UpperCAmelCase : List[str] = VisualBertForPreTraining(lowerCAmelCase_ )
elif model_type == "vqa":
_UpperCAmelCase : Optional[int] = VisualBertForQuestionAnswering(lowerCAmelCase_ )
elif model_type == "nlvr":
_UpperCAmelCase : str = VisualBertForVisualReasoning(lowerCAmelCase_ )
elif model_type == "multichoice":
_UpperCAmelCase : Dict = VisualBertForMultipleChoice(lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
# Save Checkpoints
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
A_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""orig_checkpoint_path""", type=str, help="""A path to .th on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", type=str, help="""Path to the output PyTorch model.""")
A_ : int = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 215 | 0 |
def SCREAMING_SNAKE_CASE_ ( __A : int = 60_08_51_47_51_43 ) -> int:
"""simple docstring"""
try:
a_ : Dict = int(__A )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
a_ : str = 1
a_ : int = 2
while i * i <= n:
while n % i == 0:
a_ : int = i
n //= i
i += 1
if n > 1:
a_ : Any = n
return int(__A )
if __name__ == "__main__":
print(F'{solution() = }')
| 120 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : Union[str, Any] = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Any = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 120 | 1 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowerCAmelCase_ : Any = get_tests_dir('fixtures')
lowerCAmelCase_ : Union[str, Any] = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
lowerCAmelCase_ : Dict = get_tests_dir('fixtures/dummy-config.json')
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : Optional[int] ):
_a = 0
def UpperCamelCase__ ( self : str ):
_a = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ ( self : Tuple ):
_a = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ ( self : List[Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
_a = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
_a = AutoFeatureExtractor.from_pretrained(__a ).to_dict()
config_dict.pop("feature_extractor_type" )
_a = WavaVecaFeatureExtractor(**__a )
# save in new folder
model_config.save_pretrained(__a )
config.save_pretrained(__a )
_a = AutoFeatureExtractor.from_pretrained(__a )
# make sure private variable is not incorrectly saved
_a = json.loads(config.to_json_string() )
self.assertTrue("_processor_class" not in dict_as_saved )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ ( self : Tuple ):
_a = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ ( self : Union[str, Any] ):
with self.assertRaisesRegex(
__a , "bert-base is not a local folder and is not a valid model identifier" ):
_a = AutoFeatureExtractor.from_pretrained("bert-base" )
def UpperCamelCase__ ( self : Optional[Any] ):
with self.assertRaisesRegex(
__a , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
_a = AutoFeatureExtractor.from_pretrained(__a , revision="aaaaaa" )
def UpperCamelCase__ ( self : List[Any] ):
with self.assertRaisesRegex(
__a , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ):
_a = AutoFeatureExtractor.from_pretrained("hf-internal-testing/config-no-model" )
def UpperCamelCase__ ( self : List[Any] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__a ):
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__a ):
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__a )
_a = AutoFeatureExtractor.from_pretrained(__a , trust_remote_code=__a )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
def UpperCamelCase__ ( self : Any ):
try:
AutoConfig.register("custom" , __a )
AutoFeatureExtractor.register(__a , __a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__a ):
AutoFeatureExtractor.register(__a , __a )
# Now that the config is registered, it can be used as any other config with the auto-API
_a = CustomFeatureExtractor.from_pretrained(__a )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__a )
_a = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def UpperCamelCase__ ( self : Tuple ):
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =True
try:
AutoConfig.register("custom" , __a )
AutoFeatureExtractor.register(__a , __a )
# If remote code is not set, the default is to use local
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(not hasattr(__a , "is_local" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 63 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=14 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=32 , _lowerCAmelCase=4 , _lowerCAmelCase=4 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=0.02 , ) -> Any:
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_input_mask
_lowerCAmelCase = use_token_type_ids
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = rotary_dim
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = initializer_range
_lowerCAmelCase = None
_lowerCAmelCase = vocab_size - 1
_lowerCAmelCase = vocab_size - 1
_lowerCAmelCase = vocab_size - 1
def _snake_case ( self ) -> str:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_input_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=_lowerCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def _snake_case ( self ) -> Any:
_lowerCAmelCase = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = config_and_inputs
_lowerCAmelCase = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
_lowerCAmelCase = 20
_lowerCAmelCase = model_class_name(_lowerCAmelCase )
_lowerCAmelCase = model.init_cache(input_ids.shape[0] , _lowerCAmelCase )
_lowerCAmelCase = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="i4" )
_lowerCAmelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
_lowerCAmelCase = model(
input_ids[:, :-1] , attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , position_ids=_lowerCAmelCase , )
_lowerCAmelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" )
_lowerCAmelCase = model(
input_ids[:, -1:] , attention_mask=_lowerCAmelCase , past_key_values=outputs_cache.past_key_values , position_ids=_lowerCAmelCase , )
_lowerCAmelCase = model(_lowerCAmelCase )
_lowerCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
_lowerCAmelCase = 20
_lowerCAmelCase = model_class_name(_lowerCAmelCase )
_lowerCAmelCase = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
_lowerCAmelCase = model.init_cache(input_ids.shape[0] , _lowerCAmelCase )
_lowerCAmelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
_lowerCAmelCase = model(
input_ids[:, :-1] , attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , position_ids=_lowerCAmelCase , )
_lowerCAmelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" )
_lowerCAmelCase = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=_lowerCAmelCase , position_ids=_lowerCAmelCase , )
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )
_lowerCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class lowerCAmelCase_ ( __magic_name__ ,__magic_name__ ,unittest.TestCase ):
__lowerCamelCase : str = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
__lowerCamelCase : Optional[Any] = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = FlaxGPTJModelTester(self )
def _snake_case ( self ) -> List[str]:
for model_class_name in self.all_model_classes:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self ) -> int:
for model_class_name in self.all_model_classes:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
@tooslow
def _snake_case ( self ) -> Any:
_lowerCAmelCase = GPTaTokenizer.from_pretrained("gpt2" , pad_token="<|endoftext|>" , padding_side="left" )
_lowerCAmelCase = tokenizer(["Hello this is a long string", "Hey"] , return_tensors="np" , padding=_lowerCAmelCase , truncation=_lowerCAmelCase )
_lowerCAmelCase = FlaxGPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B" )
_lowerCAmelCase = False
_lowerCAmelCase = model.config.eos_token_id
_lowerCAmelCase = jax.jit(model.generate )
_lowerCAmelCase = jit_generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , pad_token_id=tokenizer.pad_token_id ).sequences
_lowerCAmelCase = tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
_lowerCAmelCase = [
"Hello this is a long string of text.\n\nI'm trying to get the text of the",
"Hey, I'm a little late to the party. I'm going to",
]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
@is_pt_flax_cross_test
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
_lowerCAmelCase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
_lowerCAmelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
_lowerCAmelCase = getattr(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase , _lowerCAmelCase = pt_inputs["input_ids"].shape
_lowerCAmelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_lowerCAmelCase ):
_lowerCAmelCase = 0
_lowerCAmelCase = 1
_lowerCAmelCase = 0
_lowerCAmelCase = 1
_lowerCAmelCase = pt_model_class(_lowerCAmelCase ).eval()
_lowerCAmelCase = model_class(_lowerCAmelCase , dtype=jnp.floataa )
_lowerCAmelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _lowerCAmelCase )
_lowerCAmelCase = fx_state
with torch.no_grad():
_lowerCAmelCase = pt_model(**_lowerCAmelCase ).to_tuple()
_lowerCAmelCase = fx_model(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_lowerCAmelCase )
_lowerCAmelCase = model_class.from_pretrained(_lowerCAmelCase , from_pt=_lowerCAmelCase )
_lowerCAmelCase = fx_model_loaded(**_lowerCAmelCase ).to_tuple()
self.assertEqual(
len(_lowerCAmelCase ) , len(_lowerCAmelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def _snake_case ( self ) -> Dict:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
_lowerCAmelCase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
_lowerCAmelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
_lowerCAmelCase = getattr(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = pt_model_class(_lowerCAmelCase ).eval()
_lowerCAmelCase = model_class(_lowerCAmelCase , dtype=jnp.floataa )
_lowerCAmelCase = load_flax_weights_in_pytorch_model(_lowerCAmelCase , fx_model.params )
_lowerCAmelCase , _lowerCAmelCase = pt_inputs["input_ids"].shape
_lowerCAmelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_lowerCAmelCase ):
_lowerCAmelCase = 0
_lowerCAmelCase = 1
_lowerCAmelCase = 0
_lowerCAmelCase = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
_lowerCAmelCase = pt_model(**_lowerCAmelCase ).to_tuple()
_lowerCAmelCase = fx_model(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_lowerCAmelCase )
_lowerCAmelCase = pt_model_class.from_pretrained(_lowerCAmelCase , from_flax=_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = pt_model_loaded(**_lowerCAmelCase ).to_tuple()
self.assertEqual(
len(_lowerCAmelCase ) , len(_lowerCAmelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def _snake_case ( self ) -> Union[str, Any]:
for model_class_name in self.all_model_classes:
_lowerCAmelCase = model_class_name.from_pretrained("EleutherAI/gpt-j-6B" )
_lowerCAmelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCAmelCase )
| 158 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
__lowercase = None
__lowercase = logging.get_logger(__name__)
__lowercase = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__lowercase = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''',
},
}
__lowercase = {
'''albert-base-v1''': 512,
'''albert-large-v1''': 512,
'''albert-xlarge-v1''': 512,
'''albert-xxlarge-v1''': 512,
'''albert-base-v2''': 512,
'''albert-large-v2''': 512,
'''albert-xlarge-v2''': 512,
'''albert-xxlarge-v2''': 512,
}
__lowercase = '''▁'''
class _lowercase ( __a ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = AlbertTokenizer
def __init__( self : List[str] , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Any=False , UpperCamelCase__ : Union[str, Any]="[CLS]" , UpperCamelCase__ : Any="[SEP]" , UpperCamelCase__ : Any="<unk>" , UpperCamelCase__ : str="[SEP]" , UpperCamelCase__ : int="<pad>" , UpperCamelCase__ : Optional[Any]="[CLS]" , UpperCamelCase__ : List[str]="[MASK]" , **UpperCamelCase__ : str , ) -> Any:
'''simple docstring'''
__UpperCamelCase =(
AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ , normalized=UpperCamelCase__ )
if isinstance(UpperCamelCase__ , UpperCamelCase__ )
else mask_token
)
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , **UpperCamelCase__ , )
__UpperCamelCase =do_lower_case
__UpperCamelCase =remove_space
__UpperCamelCase =keep_accents
__UpperCamelCase =vocab_file
__UpperCamelCase =False if not self.vocab_file else True
def UpperCAmelCase_ ( self : Optional[Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
__UpperCamelCase =[self.sep_token_id]
__UpperCamelCase =[self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase_ ( self : Dict , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
__UpperCamelCase =[self.sep_token_id]
__UpperCamelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__UpperCamelCase =os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
return (out_vocab_file,)
| 370 |
"""simple docstring"""
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__lowercase = '''\
Text data.
Second line of data.'''
__lowercase = '''file'''
@pytest.fixture(scope='''session''' )
def lowerCAmelCase (__UpperCamelCase : Any ):
"""simple docstring"""
__UpperCamelCase =tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''')
__UpperCamelCase =bytes(__UpperCamelCase , '''utf-8''' )
with zstd.open(__UpperCamelCase , '''wb''' ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture
def lowerCAmelCase (__UpperCamelCase : Any ):
"""simple docstring"""
with open(os.path.join(tmpfs.local_root_dir , __UpperCamelCase ) , '''w''' ) as f:
f.write(__UpperCamelCase )
return FILE_PATH
@pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] )
def lowerCAmelCase (__UpperCamelCase : Any , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
__UpperCamelCase ={'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path}
__UpperCamelCase =input_paths[compression_format]
__UpperCamelCase =tmp_path / '''cache'''
__UpperCamelCase =DownloadConfig(cache_dir=__UpperCamelCase , extract_compressed_file=__UpperCamelCase )
__UpperCamelCase =cached_path(__UpperCamelCase , download_config=__UpperCamelCase )
with open(__UpperCamelCase ) as f:
__UpperCamelCase =f.read()
with open(__UpperCamelCase ) as f:
__UpperCamelCase =f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('''default_extracted''' , [True, False] )
@pytest.mark.parametrize('''default_cache_dir''' , [True, False] )
def lowerCAmelCase (__UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple ):
"""simple docstring"""
__UpperCamelCase ='''custom_cache'''
__UpperCamelCase ='''custom_extracted_dir'''
__UpperCamelCase =tmp_path / '''custom_extracted_path'''
if default_extracted:
__UpperCamelCase =('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''')
else:
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , __UpperCamelCase )
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(__UpperCamelCase ) )
__UpperCamelCase =custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
__UpperCamelCase =xz_file
__UpperCamelCase =(
DownloadConfig(extract_compressed_file=__UpperCamelCase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=__UpperCamelCase )
)
__UpperCamelCase =cached_path(__UpperCamelCase , download_config=__UpperCamelCase )
assert Path(__UpperCamelCase ).parent.parts[-2:] == expected
def lowerCAmelCase (__UpperCamelCase : Dict ):
"""simple docstring"""
__UpperCamelCase =str(Path(__UpperCamelCase ).resolve() )
assert cached_path(__UpperCamelCase ) == text_file
# relative path
__UpperCamelCase =str(Path(__UpperCamelCase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(__UpperCamelCase ) == text_file
def lowerCAmelCase (__UpperCamelCase : int ):
"""simple docstring"""
__UpperCamelCase =str(tmp_path.resolve() / '''__missing_file__.txt''' )
with pytest.raises(__UpperCamelCase ):
cached_path(__UpperCamelCase )
# relative path
__UpperCamelCase ='''./__missing_file__.txt'''
with pytest.raises(__UpperCamelCase ):
cached_path(__UpperCamelCase )
def lowerCAmelCase (__UpperCamelCase : Any ):
"""simple docstring"""
__UpperCamelCase =get_from_cache(F"""tmp://{tmpfs_file}""" )
with open(__UpperCamelCase ) as f:
__UpperCamelCase =f.read()
assert output_file_content == FILE_CONTENT
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , __UpperCamelCase )
def lowerCAmelCase ():
"""simple docstring"""
with pytest.raises(__UpperCamelCase ):
cached_path('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , __UpperCamelCase )
def lowerCAmelCase (__UpperCamelCase : Dict ):
"""simple docstring"""
__UpperCamelCase =tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(__UpperCamelCase ):
http_get('''https://huggingface.co''' , temp_file=__UpperCamelCase )
with pytest.raises(__UpperCamelCase ):
http_head('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , __UpperCamelCase )
def lowerCAmelCase (__UpperCamelCase : List[str] ):
"""simple docstring"""
__UpperCamelCase =tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(__UpperCamelCase ):
ftp_get('''ftp://huggingface.co''' , temp_file=__UpperCamelCase )
with pytest.raises(__UpperCamelCase ):
ftp_head('''ftp://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , __UpperCamelCase )
def lowerCAmelCase (__UpperCamelCase : Dict ):
"""simple docstring"""
__UpperCamelCase =tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(__UpperCamelCase ):
fsspec_get('''s3://huggingface.co''' , temp_file=__UpperCamelCase )
with pytest.raises(__UpperCamelCase ):
fsspec_head('''s3://huggingface.co''' )
| 85 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ = {
"""configuration_timesformer""": ["""TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TimesformerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"""TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimesformerModel""",
"""TimesformerForVideoClassification""",
"""TimesformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 201 |
'''simple docstring'''
a_ : str = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
a_ : Any = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
a_ : int = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 55 | 0 |
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowercase__( __SCREAMING_SNAKE_CASE : int ):
lowercase_ : Dict = filter(lambda __SCREAMING_SNAKE_CASE : p.requires_grad , model.parameters() )
lowercase_ : Optional[int] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__SCREAMING_SNAKE_CASE =logging.getLogger(__name__)
def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple ):
if metric == "rouge2":
lowercase_ : Union[str, Any] = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
lowercase_ : List[str] = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
lowercase_ : Dict = '{val_avg_em:.4f}-{step_count}'
else:
raise NotImplementedError(
F'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
' function.' )
lowercase_ : Dict = ModelCheckpoint(
dirpath=__SCREAMING_SNAKE_CASE , filename=__SCREAMING_SNAKE_CASE , monitor=F'''val_{metric}''' , mode='max' , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def lowercase__( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] ):
return EarlyStopping(
monitor=F'''val_{metric}''' , mode='min' if 'loss' in metric else 'max' , patience=__SCREAMING_SNAKE_CASE , verbose=__SCREAMING_SNAKE_CASE , )
class __lowerCamelCase ( pl.Callback ):
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> Any:
'''simple docstring'''
lowercase_ : Dict = {f'''lr_group_{i}''': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__UpperCamelCase )
@rank_zero_only
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=True ) -> None:
'''simple docstring'''
logger.info(f'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
lowercase_ : str = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
lowercase_ : Optional[int] = Path(pl_module.hparams.output_dir )
if type_path == "test":
lowercase_ : List[Any] = od / 'test_results.txt'
lowercase_ : int = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
lowercase_ : int = od / f'''{type_path}_results/{trainer.global_step:05d}.txt'''
lowercase_ : Any = od / f'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=__UpperCamelCase )
generations_file.parent.mkdir(exist_ok=__UpperCamelCase )
with open(__UpperCamelCase ,'a+' ) as writer:
for key in sorted(__UpperCamelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
lowercase_ : Tuple = metrics[key]
if isinstance(__UpperCamelCase ,torch.Tensor ):
lowercase_ : Union[str, Any] = val.item()
lowercase_ : Tuple = f'''{key}: {val:.6f}\n'''
writer.write(__UpperCamelCase )
if not save_generations:
return
if "preds" in metrics:
lowercase_ : Any = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(__UpperCamelCase )
@rank_zero_only
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
try:
lowercase_ : Any = pl_module.model.model.num_parameters()
except AttributeError:
lowercase_ : str = pl_module.model.num_parameters()
lowercase_ : List[Any] = count_trainable_parameters(__UpperCamelCase )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1e6, 'grad_mp': n_trainable_pars / 1e6} )
@rank_zero_only
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> int:
'''simple docstring'''
save_json(pl_module.metrics ,pl_module.metrics_save_path )
return self._write_logs(__UpperCamelCase ,__UpperCamelCase ,'test' )
@rank_zero_only
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
save_json(pl_module.metrics ,pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 365 |
"""simple docstring"""
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=7 ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=99 ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=50 ,__UpperCamelCase=0.02 ,__UpperCamelCase=True ,__UpperCamelCase=None ,) -> List[str]:
'''simple docstring'''
lowercase_ : Dict = parent
lowercase_ : Tuple = batch_size
lowercase_ : List[Any] = seq_length
lowercase_ : Optional[Any] = is_training
lowercase_ : Any = use_input_mask
lowercase_ : Optional[Any] = vocab_size
lowercase_ : str = hidden_size
lowercase_ : Any = num_hidden_layers
lowercase_ : Dict = num_attention_heads
lowercase_ : Optional[int] = intermediate_size
lowercase_ : Any = hidden_act
lowercase_ : Optional[Any] = hidden_dropout_prob
lowercase_ : str = attention_probs_dropout_prob
lowercase_ : Any = max_position_embeddings
lowercase_ : Optional[Any] = initializer_range
lowercase_ : Union[str, Any] = use_labels
lowercase_ : Union[str, Any] = scope
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase_ : List[str] = None
if self.use_input_mask:
lowercase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
lowercase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase_ : Any = self.get_config()
return config, input_ids, input_mask, token_labels
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return BertGenerationConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,is_decoder=__UpperCamelCase ,initializer_range=self.initializer_range ,)
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : str = self.prepare_config_and_inputs()
lowercase_ : int = True
lowercase_ : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> Any:
'''simple docstring'''
lowercase_ : Optional[Any] = BertGenerationEncoder(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : List[Any] = model(__UpperCamelCase ,attention_mask=__UpperCamelCase )
lowercase_ : Optional[Any] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Optional[Any] = True
lowercase_ : str = BertGenerationEncoder(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : Union[str, Any] = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,)
lowercase_ : Dict = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,)
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> int:
'''simple docstring'''
lowercase_ : List[str] = True
lowercase_ : Union[str, Any] = True
lowercase_ : int = BertGenerationDecoder(config=__UpperCamelCase ).to(__UpperCamelCase ).eval()
# first forward pass
lowercase_ : str = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,use_cache=__UpperCamelCase ,)
lowercase_ : Dict = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase_ : Union[str, Any] = ids_tensor((self.batch_size, 3) ,config.vocab_size )
lowercase_ : Dict = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
lowercase_ : Tuple = torch.cat([input_ids, next_tokens] ,dim=-1 )
lowercase_ : Any = torch.cat([input_mask, next_mask] ,dim=-1 )
lowercase_ : int = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,output_hidden_states=__UpperCamelCase ,)['hidden_states'][0]
lowercase_ : List[Any] = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,past_key_values=__UpperCamelCase ,output_hidden_states=__UpperCamelCase ,)['hidden_states'][0]
# select random slice
lowercase_ : int = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
lowercase_ : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase_ : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCamelCase ,__UpperCamelCase ,atol=1e-3 ) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,*__UpperCamelCase ,) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : List[str] = BertGenerationDecoder(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : Dict = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = self.prepare_config_and_inputs()
lowercase_ : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
lowercase = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
lowercase = (BertGenerationDecoder,) if is_torch_available() else ()
lowercase = (
{'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder}
if is_torch_available()
else {}
)
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Optional[Any] = BertGenerationEncoderTester(self )
lowercase_ : Tuple = ConfigTester(self ,config_class=__UpperCamelCase ,hidden_size=37 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs()
lowercase_ : Optional[int] = 'bert'
self.model_tester.create_and_check_model(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
lowercase_ : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,)
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*__UpperCamelCase )
@slow
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : int = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
self.assertIsNotNone(__UpperCamelCase )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : Tuple = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
lowercase_ : List[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
lowercase_ : Tuple = model(__UpperCamelCase )[0]
lowercase_ : Dict = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape ,__UpperCamelCase )
lowercase_ : str = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : str = BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
lowercase_ : Dict = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
lowercase_ : Dict = model(__UpperCamelCase )[0]
lowercase_ : Optional[int] = torch.Size([1, 8, 5_0358] )
self.assertEqual(output.shape ,__UpperCamelCase )
lowercase_ : Dict = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
| 321 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase : int = logging.get_logger(__name__)
UpperCAmelCase : Any = {
'''salesforce/blip2-opt-2.7b''': '''https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json''',
}
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : int = '''blip_2_vision_model'''
def __init__( self , _A=1408 , _A=6144 , _A=39 , _A=16 , _A=224 , _A=14 , _A="gelu" , _A=0.0_0_0_0_1 , _A=0.0 , _A=1e-1_0 , _A=True , **_A , ):
super().__init__(**_A )
__A : Dict = hidden_size
__A : Dict = intermediate_size
__A : List[str] = num_hidden_layers
__A : List[Any] = num_attention_heads
__A : List[Any] = patch_size
__A : List[str] = image_size
__A : Optional[Any] = initializer_range
__A : Optional[Any] = attention_dropout
__A : List[str] = layer_norm_eps
__A : Dict = hidden_act
__A : List[str] = qkv_bias
@classmethod
def UpperCAmelCase_ ( cls , _A , **_A ):
cls._set_token_in_kwargs(_A )
__A , __A : Any = cls.get_config_dict(_A , **_A )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
__A : Union[str, Any] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_A , **_A )
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : int = '''blip_2_qformer'''
def __init__( self , _A=30522 , _A=768 , _A=12 , _A=12 , _A=3072 , _A="gelu" , _A=0.1 , _A=0.1 , _A=512 , _A=0.0_2 , _A=1e-1_2 , _A=0 , _A="absolute" , _A=2 , _A=1408 , **_A , ):
super().__init__(pad_token_id=_A , **_A )
__A : Any = vocab_size
__A : str = hidden_size
__A : Optional[Any] = num_hidden_layers
__A : Any = num_attention_heads
__A : Union[str, Any] = hidden_act
__A : Optional[Any] = intermediate_size
__A : List[str] = hidden_dropout_prob
__A : Tuple = attention_probs_dropout_prob
__A : Optional[Any] = max_position_embeddings
__A : str = initializer_range
__A : str = layer_norm_eps
__A : Optional[int] = position_embedding_type
__A : str = cross_attention_frequency
__A : Optional[Any] = encoder_hidden_size
@classmethod
def UpperCAmelCase_ ( cls , _A , **_A ):
cls._set_token_in_kwargs(_A )
__A , __A : Optional[int] = cls.get_config_dict(_A , **_A )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
__A : str = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_A , **_A )
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : Tuple = '''blip-2'''
UpperCamelCase : str = True
def __init__( self , _A=None , _A=None , _A=None , _A=32 , **_A ):
super().__init__(**_A )
if vision_config is None:
__A : Any = {}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
__A : str = {}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
__A : Any = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
__A : Dict = BlipaVisionConfig(**_A )
__A : Dict = BlipaQFormerConfig(**_A )
__A : Dict = text_config['model_type'] if 'model_type' in text_config else 'opt'
__A : Union[str, Any] = CONFIG_MAPPING[text_model_type](**_A )
__A : Dict = self.text_config.tie_word_embeddings
__A : str = self.text_config.is_encoder_decoder
__A : Dict = num_query_tokens
__A : Any = self.vision_config.hidden_size
__A : List[str] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__A : Dict = 1.0
__A : str = 0.0_2
@classmethod
def UpperCAmelCase_ ( cls , _A , _A , _A , **_A , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_A , )
def UpperCAmelCase_ ( self ):
__A : Any = copy.deepcopy(self.__dict__ )
__A : Any = self.vision_config.to_dict()
__A : Optional[Any] = self.qformer_config.to_dict()
__A : Dict = self.text_config.to_dict()
__A : Optional[int] = self.__class__.model_type
return output
| 280 |
def _SCREAMING_SNAKE_CASE ( a ) -> bool:
return str(a ) == str(a )[::-1]
def _SCREAMING_SNAKE_CASE ( a ) -> int:
return int(a ) + int(str(a )[::-1] )
def _SCREAMING_SNAKE_CASE ( a = 1_00_00 ) -> int:
__A : int = []
for num in range(1 , a ):
__A : List[str] = 0
__A : List[Any] = num
while iterations < 50:
__A : str = sum_reverse(a )
iterations += 1
if is_palindrome(a ):
break
else:
lychrel_nums.append(a )
return len(a )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 280 | 1 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def UpperCamelCase ( snake_case__ : Optional[Any] ) -> Optional[int]:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4_E_0_0 and cp <= 0x9_F_F_F)
or (cp >= 0x3_4_0_0 and cp <= 0x4_D_B_F) #
or (cp >= 0x2_0_0_0_0 and cp <= 0x2_A_6_D_F) #
or (cp >= 0x2_A_7_0_0 and cp <= 0x2_B_7_3_F) #
or (cp >= 0x2_B_7_4_0 and cp <= 0x2_B_8_1_F) #
or (cp >= 0x2_B_8_2_0 and cp <= 0x2_C_E_A_F) #
or (cp >= 0xF_9_0_0 and cp <= 0xF_A_F_F)
or (cp >= 0x2_F_8_0_0 and cp <= 0x2_F_A_1_F) #
): #
return True
return False
def UpperCamelCase ( snake_case__ : str ) -> List[Any]:
# word like '180' or '身高' or '神'
for char in word:
UpperCamelCase : Tuple = ord(lowerCAmelCase__ )
if not _is_chinese_char(lowerCAmelCase__ ):
return 0
return 1
def UpperCamelCase ( snake_case__ : List[str] ) -> str:
UpperCamelCase : int = set()
for token in tokens:
UpperCamelCase : List[Any] = len(lowerCAmelCase__ ) > 1 and is_chinese(lowerCAmelCase__ )
if chinese_word:
word_set.add(lowerCAmelCase__ )
UpperCamelCase : Tuple = list(lowerCAmelCase__ )
return word_list
def UpperCamelCase ( snake_case__ : List[str] , snake_case__ : set() ) -> Union[str, Any]:
if not chinese_word_set:
return bert_tokens
UpperCamelCase : str = max([len(lowerCAmelCase__ ) for w in chinese_word_set] )
UpperCamelCase : Optional[int] = bert_tokens
UpperCamelCase : Optional[int] = 0, len(lowerCAmelCase__ )
while start < end:
UpperCamelCase : Tuple = True
if is_chinese(bert_word[start] ):
UpperCamelCase : int = min(end - start , lowerCAmelCase__ )
for i in range(lowerCAmelCase__ , 1 , -1 ):
UpperCamelCase : Optional[int] = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
UpperCamelCase : List[str] = '''##''' + bert_word[j]
UpperCamelCase : Dict = start + i
UpperCamelCase : int = False
break
if single_word:
start += 1
return bert_word
def UpperCamelCase ( snake_case__ : List[str] , snake_case__ : LTP , snake_case__ : BertTokenizer ) -> Union[str, Any]:
UpperCamelCase : str = []
for i in range(0 , len(lowerCAmelCase__ ) , 100 ):
UpperCamelCase : Dict = ltp_tokenizer.seg(lines[i : i + 100] )[0]
UpperCamelCase : str = [get_chinese_word(lowerCAmelCase__ ) for r in res]
ltp_res.extend(lowerCAmelCase__ )
assert len(lowerCAmelCase__ ) == len(lowerCAmelCase__ )
UpperCamelCase : List[Any] = []
for i in range(0 , len(lowerCAmelCase__ ) , 100 ):
UpperCamelCase : Dict = bert_tokenizer(lines[i : i + 100] , add_special_tokens=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=512 )
bert_res.extend(res['input_ids'] )
assert len(lowerCAmelCase__ ) == len(lowerCAmelCase__ )
UpperCamelCase : List[str] = []
for input_ids, chinese_word in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCamelCase : Union[str, Any] = []
for id in input_ids:
UpperCamelCase : Union[str, Any] = bert_tokenizer._convert_id_to_token(lowerCAmelCase__ )
input_tokens.append(lowerCAmelCase__ )
UpperCamelCase : Optional[int] = add_sub_symbol(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCamelCase : Union[str, Any] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(lowerCAmelCase__ ):
if token[:2] == "##":
UpperCamelCase : Any = token[2:]
# save chinese tokens' pos
if len(lowerCAmelCase__ ) == 1 and _is_chinese_char(ord(lowerCAmelCase__ ) ):
ref_id.append(lowerCAmelCase__ )
ref_ids.append(lowerCAmelCase__ )
assert len(lowerCAmelCase__ ) == len(lowerCAmelCase__ )
return ref_ids
def UpperCamelCase ( snake_case__ : str ) -> Optional[Any]:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
UpperCamelCase : Optional[Any] = f.readlines()
UpperCamelCase : Tuple = [line.strip() for line in data if len(lowerCAmelCase__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
UpperCamelCase : str = LTP(args.ltp ) # faster in GPU device
UpperCamelCase : Optional[int] = BertTokenizer.from_pretrained(args.bert )
UpperCamelCase : Union[str, Any] = prepare_ref(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
UpperCamelCase : Dict = [json.dumps(lowerCAmelCase__ ) + '''\n''' for ref in ref_ids]
f.writelines(lowerCAmelCase__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path'''
)
parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''')
parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''')
__UpperCAmelCase = parser.parse_args()
main(args)
| 352 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__UpperCAmelCase = 16
__UpperCAmelCase = 32
def UpperCamelCase ( snake_case__ : Accelerator , snake_case__ : int = 16 ) -> Dict:
UpperCamelCase : str = AutoTokenizer.from_pretrained('bert-base-cased' )
UpperCamelCase : str = load_dataset('glue' , 'mrpc' )
def tokenize_function(snake_case__ : Dict ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase : Optional[Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase : List[str] = datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase : Dict = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(snake_case__ : Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase : List[str] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase : str = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase : Dict = 8
else:
UpperCamelCase : Union[str, Any] = None
return tokenizer.pad(
snake_case__ , padding='longest' , max_length=snake_case__ , pad_to_multiple_of=snake_case__ , return_tensors='pt' , )
# Instantiate dataloaders.
UpperCamelCase : Optional[int] = DataLoader(
tokenized_datasets['train'] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
UpperCamelCase : List[Any] = DataLoader(
tokenized_datasets['validation'] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__UpperCAmelCase = mocked_dataloaders # noqa: F811
def UpperCamelCase ( snake_case__ : str , snake_case__ : List[Any] ) -> List[str]:
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , snake_case__ ) == "1":
UpperCamelCase : List[str] = 2
# New Code #
UpperCamelCase : Optional[int] = int(args.gradient_accumulation_steps )
UpperCamelCase : List[str] = int(args.local_sgd_steps )
# Initialize accelerator
UpperCamelCase : Optional[Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=snake_case__ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase : Union[str, Any] = config['lr']
UpperCamelCase : Optional[int] = int(config['num_epochs'] )
UpperCamelCase : List[Any] = int(config['seed'] )
UpperCamelCase : List[str] = int(config['batch_size'] )
UpperCamelCase : Optional[Any] = evaluate.load('glue' , 'mrpc' )
set_seed(snake_case__ )
UpperCamelCase , UpperCamelCase : int = get_dataloaders(snake_case__ , snake_case__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase : Tuple = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=snake_case__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase : Dict = model.to(accelerator.device )
# Instantiate optimizer
UpperCamelCase : List[str] = AdamW(params=model.parameters() , lr=snake_case__ )
# Instantiate scheduler
UpperCamelCase : Any = get_linear_schedule_with_warmup(
optimizer=snake_case__ , num_warmup_steps=100 , num_training_steps=(len(snake_case__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : str = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Now we train the model
for epoch in range(snake_case__ ):
model.train()
with LocalSGD(
accelerator=snake_case__ , model=snake_case__ , local_sgd_steps=snake_case__ , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(snake_case__ ):
UpperCamelCase : int = model(**snake_case__ )
UpperCamelCase : Union[str, Any] = output.loss
accelerator.backward(snake_case__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase : Dict = model(**snake_case__ )
UpperCamelCase : Any = outputs.logits.argmax(dim=-1 )
UpperCamelCase , UpperCamelCase : Any = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=snake_case__ , references=snake_case__ , )
UpperCamelCase : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , snake_case__ )
def UpperCamelCase ( ) -> Dict:
UpperCamelCase : Any = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=snake_case__ , default=snake_case__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=snake_case__ , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument(
'--local_sgd_steps' , type=snake_case__ , default=8 , help='Number of local SGD steps or None to disable local SGD' )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
UpperCamelCase : str = parser.parse_args()
UpperCamelCase : Tuple = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 103 | 0 |
'''simple docstring'''
import os
from distutils.util import strtobool
def _lowerCamelCase ( lowercase : str , lowercase : int ) -> str:
for e in env_keys:
_a = int(os.environ.get(lowercase , -1 ) )
if val >= 0:
return val
return default
def _lowerCamelCase ( lowercase : Tuple , lowercase : Dict=False ) -> Tuple:
_a = os.environ.get(lowercase , str(lowercase ) )
return strtobool(lowercase ) == 1 # As its name indicates `strtobool` actually returns an int...
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Optional[Any]="no" ) -> Tuple:
_a = os.environ.get(lowercase , str(lowercase ) )
return value
| 63 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
__A = [
"openmmlab/upernet-convnext-tiny",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
__A = "UperNetConfig"
class UpperCAmelCase (nn.Module ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 0 , _UpperCAmelCase = False , _UpperCAmelCase = 1 , ):
super().__init__()
lowercase__: List[Any] = nn.Convad(
in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , kernel_size=_UpperCAmelCase , padding=_UpperCAmelCase , bias=_UpperCAmelCase , dilation=_UpperCAmelCase , )
lowercase__: List[Any] = nn.BatchNormad(_UpperCAmelCase )
lowercase__: int = nn.ReLU()
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: Dict = self.conv(_UpperCAmelCase )
lowercase__: Optional[int] = self.batch_norm(_UpperCAmelCase )
lowercase__: List[Any] = self.activation(_UpperCAmelCase )
return output
class UpperCAmelCase (nn.Module ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
super().__init__()
lowercase__: int = [
nn.AdaptiveAvgPoolad(_UpperCAmelCase ),
UperNetConvModule(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(_UpperCAmelCase ) , _UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: Any = input
for layer in self.layers:
lowercase__: Any = layer(_UpperCAmelCase )
return hidden_state
class UpperCAmelCase (nn.Module ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
super().__init__()
lowercase__: int = pool_scales
lowercase__: Optional[Any] = align_corners
lowercase__: Optional[int] = in_channels
lowercase__: Optional[Any] = channels
lowercase__: List[Any] = []
for i, pool_scale in enumerate(_UpperCAmelCase ):
lowercase__: Optional[int] = UperNetPyramidPoolingBlock(pool_scale=_UpperCAmelCase , in_channels=_UpperCAmelCase , channels=_UpperCAmelCase )
self.blocks.append(_UpperCAmelCase )
self.add_module(str(_UpperCAmelCase ) , _UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: Union[str, Any] = []
for ppm in self.blocks:
lowercase__: Tuple = ppm(_UpperCAmelCase )
lowercase__: Any = nn.functional.interpolate(
_UpperCAmelCase , size=x.size()[2:] , mode='''bilinear''' , align_corners=self.align_corners )
ppm_outs.append(_UpperCAmelCase )
return ppm_outs
class UpperCAmelCase (nn.Module ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
super().__init__()
lowercase__: Optional[int] = config
lowercase__: int = config.pool_scales # e.g. (1, 2, 3, 6)
lowercase__: Optional[int] = in_channels
lowercase__: List[str] = config.hidden_size
lowercase__: List[str] = False
lowercase__: List[str] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
lowercase__: Dict = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
lowercase__: int = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
lowercase__: List[Any] = nn.ModuleList()
lowercase__: Union[str, Any] = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
lowercase__: int = UperNetConvModule(_UpperCAmelCase , self.channels , kernel_size=1 )
lowercase__: Dict = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(_UpperCAmelCase )
self.fpn_convs.append(_UpperCAmelCase )
lowercase__: Any = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def _snake_case ( self ):
self.apply(self._init_weights )
def _snake_case ( self , _UpperCAmelCase ):
if isinstance(_UpperCAmelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: List[str] = inputs[-1]
lowercase__: str = [x]
psp_outs.extend(self.psp_modules(_UpperCAmelCase ) )
lowercase__: Dict = torch.cat(_UpperCAmelCase , dim=1 )
lowercase__: Tuple = self.bottleneck(_UpperCAmelCase )
return output
def _snake_case ( self , _UpperCAmelCase ):
# build laterals
lowercase__: Dict = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(_UpperCAmelCase ) )
# build top-down path
lowercase__: int = len(_UpperCAmelCase )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
lowercase__: str = laterals[i - 1].shape[2:]
lowercase__: Optional[int] = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=_UpperCAmelCase , mode='''bilinear''' , align_corners=self.align_corners )
# build outputs
lowercase__: str = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
lowercase__: Any = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='''bilinear''' , align_corners=self.align_corners )
lowercase__: int = torch.cat(_UpperCAmelCase , dim=1 )
lowercase__: Tuple = self.fpn_bottleneck(_UpperCAmelCase )
lowercase__: Dict = self.classifier(_UpperCAmelCase )
return output
class UpperCAmelCase (nn.Module ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase = 2 , _UpperCAmelCase = 3 , _UpperCAmelCase = 1 ):
super().__init__()
lowercase__: Optional[Any] = config
lowercase__: Optional[Any] = config.auxiliary_in_channels
lowercase__: List[Any] = config.auxiliary_channels
lowercase__: Tuple = config.auxiliary_num_convs
lowercase__: Any = config.auxiliary_concat_input
lowercase__: Optional[int] = in_index
lowercase__: Tuple = (kernel_size // 2) * dilation
lowercase__: Tuple = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=_UpperCAmelCase , padding=_UpperCAmelCase , dilation=_UpperCAmelCase ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=_UpperCAmelCase , padding=_UpperCAmelCase , dilation=_UpperCAmelCase ) )
if self.num_convs == 0:
lowercase__: List[Any] = nn.Identity()
else:
lowercase__: Union[str, Any] = nn.Sequential(*_UpperCAmelCase )
if self.concat_input:
lowercase__: Dict = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=_UpperCAmelCase , padding=kernel_size // 2 )
lowercase__: Union[str, Any] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def _snake_case ( self ):
self.apply(self._init_weights )
def _snake_case ( self , _UpperCAmelCase ):
if isinstance(_UpperCAmelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def _snake_case ( self , _UpperCAmelCase ):
# just take the relevant feature maps
lowercase__: Dict = encoder_hidden_states[self.in_index]
lowercase__: Optional[int] = self.convs(_UpperCAmelCase )
if self.concat_input:
lowercase__: Optional[int] = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
lowercase__: Dict = self.classifier(_UpperCAmelCase )
return output
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Dict = UperNetConfig
_UpperCAmelCase :int = "pixel_values"
_UpperCAmelCase :Optional[Any] = True
def _snake_case ( self , _UpperCAmelCase ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def _snake_case ( self ):
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=False ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Any = value
__A = R"\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
__A = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes." ,_UpperCAmelCase ,)
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase )
lowercase__: Optional[int] = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
lowercase__: Any = UperNetHead(_UpperCAmelCase , in_channels=self.backbone.channels )
lowercase__: Tuple = UperNetFCNHead(_UpperCAmelCase ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length''' ) )
@replace_return_docstrings(output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC )
def _snake_case ( self , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , ):
lowercase__: Tuple = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__: Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__: str = output_attentions if output_attentions is not None else self.config.output_attentions
lowercase__: List[str] = self.backbone.forward_with_filtered_kwargs(
_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , output_attentions=_UpperCAmelCase )
lowercase__: Tuple = outputs.feature_maps
lowercase__: Union[str, Any] = self.decode_head(_UpperCAmelCase )
lowercase__: str = nn.functional.interpolate(_UpperCAmelCase , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=_UpperCAmelCase )
lowercase__: Any = None
if self.auxiliary_head is not None:
lowercase__: Union[str, Any] = self.auxiliary_head(_UpperCAmelCase )
lowercase__: Tuple = nn.functional.interpolate(
_UpperCAmelCase , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=_UpperCAmelCase )
lowercase__: List[Any] = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('''The number of labels should be greater than one''' )
else:
# compute weighted loss
lowercase__: List[str] = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
lowercase__: Optional[Any] = loss_fct(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Dict = loss_fct(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: int = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
lowercase__: Tuple = (logits,) + outputs[1:]
else:
lowercase__: Optional[int] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=_UpperCAmelCase , logits=_UpperCAmelCase , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 177 | 0 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__A = 2_00
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__A = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__A = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 10_00))
def snake_case_(_UpperCamelCase , _UpperCamelCase ) -> tuple[str, float]:
"""simple docstring"""
_snake_case = len([g for position, g in enumerate(_UpperCamelCase ) if g == main_target[position]] )
return (item, float(_UpperCamelCase ))
def snake_case_(_UpperCamelCase , _UpperCamelCase ) -> tuple[str, str]:
"""simple docstring"""
_snake_case = random.randint(0 , len(_UpperCamelCase ) - 1 )
_snake_case = parent_a[:random_slice] + parent_a[random_slice:]
_snake_case = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def snake_case_(_UpperCamelCase , _UpperCamelCase ) -> str:
"""simple docstring"""
_snake_case = list(_UpperCamelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
_snake_case = random.choice(_UpperCamelCase )
return "".join(_UpperCamelCase )
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> list[str]:
"""simple docstring"""
_snake_case = []
# Generate more children proportionally to the fitness score.
_snake_case = int(parent_a[1] * 100 ) + 1
_snake_case = 10 if child_n >= 10 else child_n
for _ in range(_UpperCamelCase ):
_snake_case = population_score[random.randint(0 , _UpperCamelCase )][0]
_snake_case, _snake_case = crossover(parent_a[0] , _UpperCamelCase )
# Append new string to the population list.
pop.append(mutate(_UpperCamelCase , _UpperCamelCase ) )
pop.append(mutate(_UpperCamelCase , _UpperCamelCase ) )
return pop
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = True ) -> tuple[int, int, str]:
"""simple docstring"""
if N_POPULATION < N_SELECTED:
_snake_case = F"""{N_POPULATION} must be bigger than {N_SELECTED}"""
raise ValueError(_UpperCamelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
_snake_case = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
_snake_case = F"""{not_in_genes_list} is not in genes list, evolution cannot converge"""
raise ValueError(_UpperCamelCase )
# Generate random starting population.
_snake_case = []
for _ in range(_UpperCamelCase ):
population.append(''''''.join([random.choice(_UpperCamelCase ) for i in range(len(_UpperCamelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
_snake_case, _snake_case = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_UpperCamelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_snake_case = [evaluate(_UpperCamelCase , _UpperCamelCase ) for item in population]
# Check if there is a matching evolution.
_snake_case = sorted(_UpperCamelCase , key=lambda _UpperCamelCase : x[1] , reverse=_UpperCamelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F"""\nGeneration: {generation}"""
F"""\nTotal Population:{total_population}"""
F"""\nBest score: {population_score[0][1]}"""
F"""\nBest string: {population_score[0][0]}""" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_snake_case = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_UpperCamelCase )
# Normalize population score to be between 0 and 1.
_snake_case = [
(item, score / len(_UpperCamelCase )) for item, score in population_score
]
# This is selection
for i in range(_UpperCamelCase ):
population.extend(select(population_score[int(_UpperCamelCase )] , _UpperCamelCase , _UpperCamelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_UpperCamelCase ) > N_POPULATION:
break
if __name__ == "__main__":
__A = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
__A = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
__A , __A , __A = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 278 |
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
_snake_case = AlbertConfig.from_json_file(_UpperCamelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
_snake_case = AlbertForPreTraining(_UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_albert(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _UpperCamelCase )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--albert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained ALBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__A = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 278 | 1 |
def A_ ( a ):
"""simple docstring"""
if not nums: # Makes sure that the list is not empty
raise ValueError('List is empty' )
SCREAMING_SNAKE_CASE_ : List[Any] = sum(lowerCAmelCase__ ) / len(lowerCAmelCase__ ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 253 |
"""simple docstring"""
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowercase__ : List[str] = logging.getLogger()
def UpperCamelCase_ ( lowerCAmelCase__ : str ) -> Dict:
"""simple docstring"""
lowerCAmelCase_ : Dict = {}
lowerCAmelCase_ : Union[str, Any] = os.path.join(lowerCAmelCase__ , 'all_results.json' )
if os.path.exists(lowerCAmelCase__ ):
with open(lowerCAmelCase__ , 'r' ) as f:
lowerCAmelCase_ : Any = json.load(lowerCAmelCase__ )
else:
raise ValueError(f"can't find {path}" )
return results
lowercase__ : Optional[int] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self : Any ):
import xla_spawn
lowerCAmelCase_ : Union[str, Any] = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ : Optional[Any] = F"\n ./examples/pytorch/text-classification/run_glue.py\n --num_cores=8\n ./examples/pytorch/text-classification/run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --do_train\n --do_eval\n --debug tpu_metrics_debug\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --max_steps=10\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split()
with patch.object(SCREAMING_SNAKE_CASE_ , 'argv' , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase_ : Optional[Any] = time()
xla_spawn.main()
lowerCAmelCase_ : Any = time()
lowerCAmelCase_ : Dict = get_results(SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 5_0_0 )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
import xla_spawn
lowerCAmelCase_ : Dict = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split()
with patch.object(SCREAMING_SNAKE_CASE_ , 'argv' , SCREAMING_SNAKE_CASE_ ):
xla_spawn.main()
| 224 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowercase : List[str] = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[Any] = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
__lowercase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 365 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__lowercase : Dict = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ):
from transformers.testing_utils import pytest_terminal_summary_main
__a : Any = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(_SCREAMING_SNAKE_CASE , id=_SCREAMING_SNAKE_CASE )
| 294 | 0 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class snake_case_( a__ ):
__UpperCamelCase = ['''image_processor''', '''tokenizer''']
__UpperCamelCase = '''FlavaImageProcessor'''
__UpperCamelCase = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : Optional[int] , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Any=None , **UpperCamelCase_ : Dict ):
lowerCAmelCase : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCamelCase_ , )
lowerCAmelCase : List[str] = kwargs.pop('''feature_extractor''' )
lowerCAmelCase : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : List[str] = self.image_processor
def __call__( self : Union[str, Any] , UpperCamelCase_ : Optional[ImageInput] = None , UpperCamelCase_ : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , UpperCamelCase_ : bool = True , UpperCamelCase_ : Union[bool, str, PaddingStrategy] = False , UpperCamelCase_ : Union[bool, str, TruncationStrategy] = False , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : int = 0 , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[bool] = None , UpperCamelCase_ : Optional[bool] = None , UpperCamelCase_ : Optional[bool] = None , UpperCamelCase_ : Optional[bool] = None , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , **UpperCamelCase_ : List[Any] , ):
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowerCAmelCase : Optional[Any] = self.tokenizer(
text=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , stride=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , return_overflowing_tokens=UpperCamelCase_ , return_special_tokens_mask=UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , return_length=UpperCamelCase_ , verbose=UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ , )
if images is not None:
lowerCAmelCase : str = self.image_processor(
UpperCamelCase_ , return_image_mask=UpperCamelCase_ , return_codebook_pixels=UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ , )
if text is not None and images is not None:
encoding.update(UpperCamelCase_ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase_ ) , tensor_type=UpperCamelCase_ )
def lowerCamelCase__ ( self : Any , *UpperCamelCase_ : str , **UpperCamelCase_ : Dict ):
return self.tokenizer.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : Union[str, Any] , *UpperCamelCase_ : str , **UpperCamelCase_ : str ):
return self.tokenizer.decode(*UpperCamelCase_ , **UpperCamelCase_ )
@property
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : List[Any] = self.tokenizer.model_input_names
lowerCAmelCase : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCamelCase__ ( self : Union[str, Any] ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCamelCase_ , )
return self.image_processor_class
@property
def lowerCamelCase__ ( self : Optional[int] ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , UpperCamelCase_ , )
return self.image_processor
| 60 |
class _a :
"""simple docstring"""
def __init__( self: Union[str, Any] , __lowerCamelCase: int , __lowerCamelCase: Tuple=None , __lowerCamelCase: Optional[Any]=None ):
'''simple docstring'''
UpperCamelCase__: Any = data
UpperCamelCase__: Tuple = previous
UpperCamelCase__: Any = next_node
def __str__( self: str ):
'''simple docstring'''
return F"{self.data}"
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
return self.data
def UpperCAmelCase_ ( self: str ):
'''simple docstring'''
return self.next
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
return self.previous
class _a :
"""simple docstring"""
def __init__( self: List[str] , __lowerCamelCase: str ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = head
def __iter__( self: Optional[int] ):
'''simple docstring'''
return self
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
if not self.current:
raise StopIteration
else:
UpperCamelCase__: Tuple = self.current.get_data()
UpperCamelCase__: str = self.current.get_next()
return value
class _a :
"""simple docstring"""
def __init__( self: List[Any] ):
'''simple docstring'''
UpperCamelCase__: List[str] = None # First node in list
UpperCamelCase__: str = None # Last node in list
def __str__( self: List[Any] ):
'''simple docstring'''
UpperCamelCase__: Dict = self.head
UpperCamelCase__: int = []
while current is not None:
nodes.append(current.get_data() )
UpperCamelCase__: Optional[Any] = current.get_next()
return " ".join(str(__lowerCamelCase ) for node in nodes )
def __contains__( self: List[str] , __lowerCamelCase: int ):
'''simple docstring'''
UpperCamelCase__: Any = self.head
while current:
if current.get_data() == value:
return True
UpperCamelCase__: int = current.get_next()
return False
def __iter__( self: List[Any] ):
'''simple docstring'''
return LinkedListIterator(self.head )
def UpperCAmelCase_ ( self: List[str] ):
'''simple docstring'''
if self.head:
return self.head.get_data()
return None
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
if self.tail:
return self.tail.get_data()
return None
def UpperCAmelCase_ ( self: List[str] , __lowerCamelCase: Node ):
'''simple docstring'''
if self.head is None:
UpperCamelCase__: List[str] = node
UpperCamelCase__: List[str] = node
else:
self.insert_before_node(self.head , __lowerCamelCase )
def UpperCAmelCase_ ( self: Any , __lowerCamelCase: Node ):
'''simple docstring'''
if self.head is None:
self.set_head(__lowerCamelCase )
else:
self.insert_after_node(self.tail , __lowerCamelCase )
def UpperCAmelCase_ ( self: Dict , __lowerCamelCase: int ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = Node(__lowerCamelCase )
if self.head is None:
self.set_head(__lowerCamelCase )
else:
self.set_tail(__lowerCamelCase )
def UpperCAmelCase_ ( self: Tuple , __lowerCamelCase: Node , __lowerCamelCase: Node ):
'''simple docstring'''
UpperCamelCase__: Tuple = node
UpperCamelCase__: int = node.previous
if node.get_previous() is None:
UpperCamelCase__: List[str] = node_to_insert
else:
UpperCamelCase__: Union[str, Any] = node_to_insert
UpperCamelCase__: Dict = node_to_insert
def UpperCAmelCase_ ( self: Dict , __lowerCamelCase: Node , __lowerCamelCase: Node ):
'''simple docstring'''
UpperCamelCase__: List[Any] = node
UpperCamelCase__: Dict = node.next
if node.get_next() is None:
UpperCamelCase__: Optional[int] = node_to_insert
else:
UpperCamelCase__: Optional[int] = node_to_insert
UpperCamelCase__: Any = node_to_insert
def UpperCAmelCase_ ( self: List[Any] , __lowerCamelCase: int , __lowerCamelCase: int ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = 1
UpperCamelCase__: Dict = Node(__lowerCamelCase )
UpperCamelCase__: Dict = self.head
while node:
if current_position == position:
self.insert_before_node(__lowerCamelCase , __lowerCamelCase )
return
current_position += 1
UpperCamelCase__: Dict = node.next
self.insert_after_node(self.tail , __lowerCamelCase )
def UpperCAmelCase_ ( self: List[Any] , __lowerCamelCase: int ):
'''simple docstring'''
UpperCamelCase__: Any = self.head
while node:
if node.get_data() == item:
return node
UpperCamelCase__: str = node.get_next()
raise Exception("Node not found" )
def UpperCAmelCase_ ( self: Union[str, Any] , __lowerCamelCase: Any ):
'''simple docstring'''
if (node := self.get_node(__lowerCamelCase )) is not None:
if node == self.head:
UpperCamelCase__: List[Any] = self.head.get_next()
if node == self.tail:
UpperCamelCase__: Union[str, Any] = self.tail.get_previous()
self.remove_node_pointers(__lowerCamelCase )
@staticmethod
def UpperCAmelCase_ ( __lowerCamelCase: Node ):
'''simple docstring'''
if node.get_next():
UpperCamelCase__: List[str] = node.previous
if node.get_previous():
UpperCamelCase__: Union[str, Any] = node.next
UpperCamelCase__: Union[str, Any] = None
UpperCamelCase__: int = None
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
return self.head is None
def lowerCAmelCase_ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 149 | 0 |
from __future__ import annotations
import requests
def __lowercase ( __lowerCAmelCase : str ):
a__ = F'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'
return requests.get(__lowerCAmelCase ).json()
def __lowercase ( __lowerCAmelCase : int = 1_0 ):
a__ = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'
a__ = requests.get(__lowerCAmelCase ).json()[:max_stories]
return [get_hackernews_story(__lowerCAmelCase ) for story_id in story_ids]
def __lowercase ( __lowerCAmelCase : int = 1_0 ):
a__ = hackernews_top_stories(__lowerCAmelCase )
return "\n".join('* [{title}]({url})'.format(**__lowerCAmelCase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 360 |
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
snake_case : Tuple = logging.get_logger(__name__)
enable_full_determinism()
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : str = UNetaDModel
UpperCAmelCase__ : str = '''sample'''
@property
def lowerCamelCase__( self :Optional[int] ) -> List[str]:
a__ = 4
a__ = 3
a__ = (32, 32)
a__ = floats_tensor((batch_size, num_channels) + sizes ).to(__snake_case )
a__ = torch.tensor([10] ).to(__snake_case )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase__( self :Tuple ) -> Tuple:
return (3, 32, 32)
@property
def lowerCamelCase__( self :List[str] ) -> Optional[Any]:
return (3, 32, 32)
def lowerCamelCase__( self :str ) -> Tuple:
a__ = {
'block_out_channels': (32, 64),
'down_block_types': ('DownBlock2D', 'AttnDownBlock2D'),
'up_block_types': ('AttnUpBlock2D', 'UpBlock2D'),
'attention_head_dim': 3,
'out_channels': 3,
'in_channels': 3,
'layers_per_block': 2,
'sample_size': 32,
}
a__ = self.dummy_input
return init_dict, inputs_dict
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : int = UNetaDModel
UpperCAmelCase__ : Any = '''sample'''
@property
def lowerCamelCase__( self :Dict ) -> List[str]:
a__ = 4
a__ = 4
a__ = (32, 32)
a__ = floats_tensor((batch_size, num_channels) + sizes ).to(__snake_case )
a__ = torch.tensor([10] ).to(__snake_case )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase__( self :Any ) -> str:
return (4, 32, 32)
@property
def lowerCamelCase__( self :Any ) -> Dict:
return (4, 32, 32)
def lowerCamelCase__( self :int ) -> int:
a__ = {
'sample_size': 32,
'in_channels': 4,
'out_channels': 4,
'layers_per_block': 2,
'block_out_channels': (32, 64),
'attention_head_dim': 32,
'down_block_types': ('DownBlock2D', 'DownBlock2D'),
'up_block_types': ('UpBlock2D', 'UpBlock2D'),
}
a__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase__( self :str ) -> Any:
a__ , a__ = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' ,output_loading_info=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertEqual(len(loading_info['missing_keys'] ) ,0 )
model.to(__snake_case )
a__ = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' ,'This test is supposed to run on GPU' )
def lowerCamelCase__( self :Tuple ) -> Optional[int]:
a__ , a__ = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' ,output_loading_info=__snake_case )
model.to(__snake_case )
a__ = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' ,'This test is supposed to run on GPU' )
def lowerCamelCase__( self :Union[str, Any] ) -> int:
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
a__ , a__ = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' ,output_loading_info=__snake_case )
model_accelerate.to(__snake_case )
model_accelerate.eval()
a__ = torch.randn(
1 ,model_accelerate.config.in_channels ,model_accelerate.config.sample_size ,model_accelerate.config.sample_size ,generator=torch.manual_seed(0 ) ,)
a__ = noise.to(__snake_case )
a__ = torch.tensor([10] * noise.shape[0] ).to(__snake_case )
a__ = model_accelerate(__snake_case ,__snake_case )['sample']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
a__ , a__ = UNetaDModel.from_pretrained(
'fusing/unet-ldm-dummy-update' ,output_loading_info=__snake_case ,low_cpu_mem_usage=__snake_case )
model_normal_load.to(__snake_case )
model_normal_load.eval()
a__ = model_normal_load(__snake_case ,__snake_case )['sample']
assert torch_all_close(__snake_case ,__snake_case ,rtol=1E-3 )
def lowerCamelCase__( self :str ) -> Union[str, Any]:
a__ = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' )
model.eval()
model.to(__snake_case )
a__ = torch.randn(
1 ,model.config.in_channels ,model.config.sample_size ,model.config.sample_size ,generator=torch.manual_seed(0 ) ,)
a__ = noise.to(__snake_case )
a__ = torch.tensor([10] * noise.shape[0] ).to(__snake_case )
with torch.no_grad():
a__ = model(__snake_case ,__snake_case ).sample
a__ = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
a__ = torch.tensor([-13.32_58, -20.11_00, -15.98_73, -17.66_17, -23.05_96, -17.94_19, -13.36_75, -16.18_89, -12.38_00] )
# fmt: on
self.assertTrue(torch_all_close(__snake_case ,__snake_case ,rtol=1E-3 ) )
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Dict = UNetaDModel
UpperCAmelCase__ : Optional[Any] = '''sample'''
@property
def lowerCamelCase__( self :Optional[Any] ,__snake_case :List[Any]=(32, 32) ) -> Optional[int]:
a__ = 4
a__ = 3
a__ = floats_tensor((batch_size, num_channels) + sizes ).to(__snake_case )
a__ = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa ,device=__snake_case )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase__( self :Tuple ) -> Optional[int]:
return (3, 32, 32)
@property
def lowerCamelCase__( self :Optional[Any] ) -> Optional[int]:
return (3, 32, 32)
def lowerCamelCase__( self :Optional[Any] ) -> List[str]:
a__ = {
'block_out_channels': [32, 64, 64, 64],
'in_channels': 3,
'layers_per_block': 1,
'out_channels': 3,
'time_embedding_type': 'fourier',
'norm_eps': 1E-6,
'mid_block_scale_factor': math.sqrt(2.0 ),
'norm_num_groups': None,
'down_block_types': [
'SkipDownBlock2D',
'AttnSkipDownBlock2D',
'SkipDownBlock2D',
'SkipDownBlock2D',
],
'up_block_types': [
'SkipUpBlock2D',
'SkipUpBlock2D',
'AttnSkipUpBlock2D',
'SkipUpBlock2D',
],
}
a__ = self.dummy_input
return init_dict, inputs_dict
@slow
def lowerCamelCase__( self :str ) -> Tuple:
a__ , a__ = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' ,output_loading_info=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertEqual(len(loading_info['missing_keys'] ) ,0 )
model.to(__snake_case )
a__ = self.dummy_input
a__ = floats_tensor((4, 3) + (2_56, 2_56) ).to(__snake_case )
a__ = noise
a__ = model(**__snake_case )
assert image is not None, "Make sure output is not None"
@slow
def lowerCamelCase__( self :Union[str, Any] ) -> Dict:
a__ = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' )
model.to(__snake_case )
a__ = 4
a__ = 3
a__ = (2_56, 2_56)
a__ = torch.ones((batch_size, num_channels) + sizes ).to(__snake_case )
a__ = torch.tensor(batch_size * [1E-4] ).to(__snake_case )
with torch.no_grad():
a__ = model(__snake_case ,__snake_case ).sample
a__ = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
a__ = torch.tensor([-48_42.86_91, -64_99.66_31, -38_00.19_53, -79_78.26_86, -1_09_80.71_29, -2_00_28.85_35, 81_48.28_22, 23_42.29_05, 5_67.76_08] )
# fmt: on
self.assertTrue(torch_all_close(__snake_case ,__snake_case ,rtol=1E-2 ) )
def lowerCamelCase__( self :Dict ) -> int:
a__ = UNetaDModel.from_pretrained('fusing/ncsnpp-ffhq-ve-dummy-update' )
model.to(__snake_case )
a__ = 4
a__ = 3
a__ = (32, 32)
a__ = torch.ones((batch_size, num_channels) + sizes ).to(__snake_case )
a__ = torch.tensor(batch_size * [1E-4] ).to(__snake_case )
with torch.no_grad():
a__ = model(__snake_case ,__snake_case ).sample
a__ = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
a__ = torch.tensor([-0.03_25, -0.09_00, -0.08_69, -0.03_32, -0.07_25, -0.02_70, -0.01_01, 0.02_27, 0.02_56] )
# fmt: on
self.assertTrue(torch_all_close(__snake_case ,__snake_case ,rtol=1E-2 ) )
def lowerCamelCase__( self :int ) -> str:
# not required for this model
pass
| 109 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class lowerCamelCase__ ( lowerCAmelCase_):
'''simple docstring'''
_A = None
_A = None
_A = None
_A = None
class lowerCamelCase__ ( lowerCAmelCase_):
'''simple docstring'''
def __init__( self :Union[str, Any] , a :int=1 , a :int=0 , a :List[str]=2 , a :List[str]=5_1_2 , a :Tuple="cls" , a :Union[str, Any]=False , a :str=True , **a :Union[str, Any] , ) -> Any:
super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
__UpperCamelCase : Union[str, Any] = project_dim
__UpperCamelCase : str = pooler_fn
__UpperCamelCase : List[str] = learn_encoder
__UpperCamelCase : int = use_attention_mask
class lowerCamelCase__ ( lowerCAmelCase_):
'''simple docstring'''
_A = [r'''pooler''', r'''logit_scale''']
_A = [r'''position_ids''', r'''predictions.decoder.bias''']
_A = '''roberta'''
_A = RobertaSeriesConfig
def __init__( self :List[str] , a :Any ) -> Tuple:
super().__init__(_snake_case )
__UpperCamelCase : Union[str, Any] = XLMRobertaModel(_snake_case )
__UpperCamelCase : Optional[int] = nn.Linear(config.hidden_size , config.project_dim )
__UpperCamelCase : Optional[int] = getattr(_snake_case , "has_pre_transformation" , _snake_case )
if self.has_pre_transformation:
__UpperCamelCase : Union[str, Any] = nn.Linear(config.hidden_size , config.project_dim )
__UpperCamelCase : Any = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def _lowerCamelCase ( self :Dict , a :Optional[torch.Tensor] = None , a :Optional[torch.Tensor] = None , a :Optional[torch.Tensor] = None , a :Optional[torch.Tensor] = None , a :Optional[torch.Tensor] = None , a :Optional[torch.Tensor] = None , a :Optional[torch.Tensor] = None , a :Optional[torch.Tensor] = None , a :Optional[bool] = None , a :Optional[bool] = None , a :Optional[bool] = None , ) -> Optional[int]:
__UpperCamelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCamelCase : Any = self.base_model(
input_ids=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , position_ids=_snake_case , head_mask=_snake_case , inputs_embeds=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , output_attentions=_snake_case , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=_snake_case , )
if self.has_pre_transformation:
__UpperCamelCase : Optional[int] = outputs['''hidden_states'''][-2]
__UpperCamelCase : Union[str, Any] = self.pre_LN(_snake_case )
__UpperCamelCase : Optional[int] = self.transformation_pre(_snake_case )
return TransformationModelOutput(
projection_state=_snake_case , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
__UpperCamelCase : str = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=_snake_case , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 232 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
__lowerCAmelCase : Any = (3, 9, -11, 0, 7, 5, 1, -1)
__lowerCAmelCase : Tuple = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
A__ : int
A__ : Node | None
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , _snake_case : Iterable[int] ):
__lowercase : Node | None = None
for i in sorted(_snake_case , reverse=_snake_case ):
__lowercase : List[Any] = Node(_snake_case , self.head )
def __iter__( self : str ):
__lowercase : Union[str, Any] = self.head
while node:
yield node.data
__lowercase : List[Any] = node.next_node
def __len__( self : str ):
return sum(1 for _ in self )
def __str__( self : List[str] ):
return " -> ".join([str(_snake_case ) for node in self] )
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> SortedLinkedList:
return SortedLinkedList(list(__lowerCAmelCase ) + list(__lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : Dict = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 156 | 0 |
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class __UpperCAmelCase ( unittest.TestCase ):
__SCREAMING_SNAKE_CASE : str = MODEL_FOR_CAUSAL_LM_MAPPING
__SCREAMING_SNAKE_CASE : Union[str, Any] = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def a ( self ):
snake_case_ = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='pt' )
# Using `do_sample=False` to force deterministic output
snake_case_ = text_generator('This is a test' , do_sample=_snake_case )
self.assertEqual(
_snake_case , [
{
'generated_text': (
'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'
' oscope. FiliFili@@'
)
}
] , )
snake_case_ = text_generator(['This is a test', 'This is a second test'] )
self.assertEqual(
_snake_case , [
[
{
'generated_text': (
'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'
' oscope. FiliFili@@'
)
}
],
[
{
'generated_text': (
'This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'
' oscope. oscope. FiliFili@@'
)
}
],
] , )
snake_case_ = text_generator('This is a test' , do_sample=_snake_case , num_return_sequences=2 , return_tensors=_snake_case )
self.assertEqual(
_snake_case , [
{'generated_token_ids': ANY(_snake_case )},
{'generated_token_ids': ANY(_snake_case )},
] , )
snake_case_ = text_generator.model.config.eos_token_id
snake_case_ = "<pad>"
snake_case_ = text_generator(
['This is a test', 'This is a second test'] , do_sample=_snake_case , num_return_sequences=2 , batch_size=2 , return_tensors=_snake_case , )
self.assertEqual(
_snake_case , [
[
{'generated_token_ids': ANY(_snake_case )},
{'generated_token_ids': ANY(_snake_case )},
],
[
{'generated_token_ids': ANY(_snake_case )},
{'generated_token_ids': ANY(_snake_case )},
],
] , )
@require_tf
def a ( self ):
snake_case_ = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='tf' )
# Using `do_sample=False` to force deterministic output
snake_case_ = text_generator('This is a test' , do_sample=_snake_case )
self.assertEqual(
_snake_case , [
{
'generated_text': (
'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'
' please,'
)
}
] , )
snake_case_ = text_generator(['This is a test', 'This is a second test'] , do_sample=_snake_case )
self.assertEqual(
_snake_case , [
[
{
'generated_text': (
'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'
' please,'
)
}
],
[
{
'generated_text': (
'This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'
' Cannes 閲閲Cannes Cannes Cannes 攵 please,'
)
}
],
] , )
def a ( self , snake_case , snake_case , snake_case ):
snake_case_ = TextGenerationPipeline(model=_snake_case , tokenizer=_snake_case )
return text_generator, ["This is a test", "Another test"]
def a ( self ):
snake_case_ = "Hello I believe in"
snake_case_ = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' )
snake_case_ = text_generator(_snake_case )
self.assertEqual(
_snake_case , [{'generated_text': 'Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'}] , )
snake_case_ = text_generator(_snake_case , stop_sequence=' fe' )
self.assertEqual(_snake_case , [{'generated_text': 'Hello I believe in fe'}] )
def a ( self , snake_case , snake_case ):
snake_case_ = text_generator.model
snake_case_ = text_generator.tokenizer
snake_case_ = text_generator('This is a test' )
self.assertEqual(_snake_case , [{'generated_text': ANY(_snake_case )}] )
self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) )
snake_case_ = text_generator('This is a test' , return_full_text=_snake_case )
self.assertEqual(_snake_case , [{'generated_text': ANY(_snake_case )}] )
self.assertNotIn('This is a test' , outputs[0]['generated_text'] )
snake_case_ = pipeline(task='text-generation' , model=_snake_case , tokenizer=_snake_case , return_full_text=_snake_case )
snake_case_ = text_generator('This is a test' )
self.assertEqual(_snake_case , [{'generated_text': ANY(_snake_case )}] )
self.assertNotIn('This is a test' , outputs[0]['generated_text'] )
snake_case_ = text_generator('This is a test' , return_full_text=_snake_case )
self.assertEqual(_snake_case , [{'generated_text': ANY(_snake_case )}] )
self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) )
snake_case_ = text_generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=_snake_case )
self.assertEqual(
_snake_case , [
[{'generated_text': ANY(_snake_case )}, {'generated_text': ANY(_snake_case )}],
[{'generated_text': ANY(_snake_case )}, {'generated_text': ANY(_snake_case )}],
] , )
if text_generator.tokenizer.pad_token is not None:
snake_case_ = text_generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=_snake_case )
self.assertEqual(
_snake_case , [
[{'generated_text': ANY(_snake_case )}, {'generated_text': ANY(_snake_case )}],
[{'generated_text': ANY(_snake_case )}, {'generated_text': ANY(_snake_case )}],
] , )
with self.assertRaises(_snake_case ):
snake_case_ = text_generator('test' , return_full_text=_snake_case , return_text=_snake_case )
with self.assertRaises(_snake_case ):
snake_case_ = text_generator('test' , return_full_text=_snake_case , return_tensors=_snake_case )
with self.assertRaises(_snake_case ):
snake_case_ = text_generator('test' , return_text=_snake_case , return_tensors=_snake_case )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
snake_case_ = text_generator('' )
self.assertEqual(_snake_case , [{'generated_text': ANY(_snake_case )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
snake_case_ = text_generator('' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
snake_case_ = ["RwkvForCausalLM", "XGLMForCausalLM", "GPTNeoXForCausalLM"]
if (
tokenizer.model_max_length < 1_0000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('This is a test' * 500 , max_new_tokens=20 )
snake_case_ = text_generator('This is a test' * 500 , handle_long_generation='hole' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(_snake_case ):
text_generator(
'This is a test' * 500 , handle_long_generation='hole' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def a ( self ):
import torch
# Classic `model_kwargs`
snake_case_ = pipeline(
model='hf-internal-testing/tiny-random-bloom' , model_kwargs={'device_map': 'auto', 'torch_dtype': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
snake_case_ = pipe('This is a test' )
self.assertEqual(
_snake_case , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
snake_case_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
snake_case_ = pipe('This is a test' )
self.assertEqual(
_snake_case , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
snake_case_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
snake_case_ = pipe('This is a test' )
self.assertEqual(
_snake_case , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
@require_torch
@require_torch_gpu
def a ( self ):
import torch
snake_case_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device=0 , torch_dtype=torch.floataa )
pipe('This is a test' )
@require_torch
@require_accelerate
@require_torch_gpu
def a ( self ):
import torch
snake_case_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.floataa )
pipe('This is a test' , do_sample=_snake_case , top_p=0.5 )
def a ( self ):
snake_case_ = "Hello world"
snake_case_ = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' )
if text_generator.model.framework == "tf":
snake_case_ = logging.get_logger('transformers.generation.tf_utils' )
else:
snake_case_ = logging.get_logger('transformers.generation.utils' )
snake_case_ = "Both `max_new_tokens`" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(_snake_case ) as cl:
snake_case_ = text_generator(_snake_case , max_length=10 , max_new_tokens=1 )
self.assertIn(_snake_case , cl.out )
# The user only sets one -> no warning
with CaptureLogger(_snake_case ) as cl:
snake_case_ = text_generator(_snake_case , max_new_tokens=1 )
self.assertNotIn(_snake_case , cl.out )
with CaptureLogger(_snake_case ) as cl:
snake_case_ = text_generator(_snake_case , max_length=10 )
self.assertNotIn(_snake_case , cl.out )
| 351 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : str = logging.get_logger(__name__)
_UpperCAmelCase : int = {
"""google/canine-s""": """https://huggingface.co/google/canine-s/resolve/main/config.json""",
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class lowercase ( lowercase_ ):
__SCREAMING_SNAKE_CASE : int = '''canine'''
def __init__( self , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3072 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=1_6384 , snake_case=16 , snake_case=0.02 , snake_case=1e-1_2 , snake_case=0 , snake_case=0xE000 , snake_case=0xE001 , snake_case=4 , snake_case=4 , snake_case=8 , snake_case=1_6384 , snake_case=128 , **snake_case , ):
super().__init__(pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case )
snake_case_ = max_position_embeddings
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = type_vocab_size
snake_case_ = layer_norm_eps
# Character config:
snake_case_ = downsampling_rate
snake_case_ = upsampling_kernel_size
snake_case_ = num_hash_functions
snake_case_ = num_hash_buckets
snake_case_ = local_transformer_stride
| 200 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.